content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
#!/usr/bin/env python3 import importlib import logging import os import traceback import pybullet_data import rclpy from rclpy import executors from rclpy.executors import MultiThreadedExecutor from rclpy.node import Node from std_srvs.srv import Empty from pybullet_ros.function_exec_manager import FuncExecManager class pyBulletRosWrapper(Node): """ROS wrapper class for pybullet simulator""" def __init__(self): super().__init__('pybullet_ros', automatically_declare_parameters_from_overrides=True) ex = MultiThreadedExecutor() self.executor = ex # import pybullet self.pb = importlib.import_module('pybullet') # get from param server the frequency at which to run the simulation self.loop_rate = self.get_parameter('loop_rate').value self.get_logger().info('Loop rate: {}'.format(self.loop_rate)) # query from param server if gui is needed is_gui_needed = self.get_parameter('pybullet_gui').value # get from param server if user wants to pause simulation at startup self.pause_simulation = self.get_parameter('pause_simulation').value print('\033[34m') # print pybullet stuff in blue physicsClient = self.start_gui(gui=is_gui_needed) # we dont need to store the physics client for now... # setup service to restart simulation self.create_service(Empty, 'reset_simulation', self.handle_reset_simulation) # setup services for pausing/unpausing simulation self.create_service(Empty, 'pause_physics', self.handle_pause_physics) self.create_service(Empty, 'unpause_physics', self.handle_unpause_physics) # get pybullet path in your system and store it internally for future use, e.g. to set floor self.pb.setAdditionalSearchPath(pybullet_data.getDataPath()) # create object of environment class for later use env_plugin = self.get_parameter('environment').value # default : plugins/environment.py plugin_import_prefix = self.get_parameter('plugin_import_prefix').value self.environment = getattr(importlib.import_module(f'{plugin_import_prefix}.{env_plugin}'), 'Environment')(self) # load robot URDF model, set gravity, and ground plane self.robot = self.init_pybullet_robot() self.connected_to_physics_server = None if not self.robot: self.connected_to_physics_server = False return # Error while loading urdf file else: self.connected_to_physics_server = True # get all revolute joint names and pybullet index rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic = self.get_properties() # import plugins dynamically self.plugins = [] plugins = self.get_parameter('plugins').value if not plugins: self.get_logger().warn('No plugins found, forgot to set param plugins?') # return to normal shell color print('\033[0m') # load plugins for plugin in plugins: module_, class_ = plugin.split(':') params_ = {'module': module_, 'class': class_} self.get_logger().info('loading plugin: {} class from {}'.format(class_, module_)) # create object of the imported file class obj = getattr(importlib.import_module(module_), class_)(self.pb, self.robot, rev_joints=rev_joint_index_name_dic, prism_joints=prismatic_joint_index_name_dic, fixed_joints=fixed_joint_index_name_dic, link_ids=link_names_to_ids_dic, **params_) # store objects in member variable for future use self.plugins.append(obj) self.executor.add_node(obj) self.get_logger().info('pybullet ROS wrapper initialized') self.timer = self.create_timer(1.0 / self.loop_rate, self.wrapper_callback) self.executor.add_node(self) try: self.executor.spin() #except Exception as e: # self.get_logger().error(traceback.format_exc()) finally: self.executor.shutdown() self.destroy_node() for node in self.plugins: node.destroy_node() def wrapper_callback(self): self.pb.stepSimulation() if not self.connected_to_physics_server: self.pb.disconnect() def get_properties(self): """ construct 3 dictionaries: - joint index to joint name x2 (1 for revolute, 1 for fixed joints) - link name to link index dictionary """ rev_joint_index_name_dic = {} fixed_joint_index_name_dic = {} prismatic_joint_index_name_dic = {} link_names_to_ids_dic = {} for joint_index in range(0, self.pb.getNumJoints(self.robot)): info = self.pb.getJointInfo(self.robot, joint_index) # build a dictionary of link names to ids link_names_to_ids_dic[info[12].decode('utf-8')] = joint_index # ensure we are dealing with a revolute joint if info[2] == self.pb.JOINT_REVOLUTE: # insert key, value in dictionary (joint index, joint name) rev_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name elif info[2] == self.pb.JOINT_FIXED: # insert key, value in dictionary (joint index, joint name) fixed_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name elif info[2] == self.pb.JOINT_PRISMATIC: prismatic_joint_index_name_dic[joint_index] = info[1].decode('utf-8') # info[1] refers to joint name return rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic def handle_reset_simulation(self, req): """Callback to handle the service offered by this node to reset the simulation""" self.get_logger().info('reseting simulation now') self.pb.resetSimulation() return Empty() def start_gui(self, gui=True): """start physics engine (client) with or without gui""" if(gui): # start simulation with gui self.get_logger().info('Running pybullet with gui') self.get_logger().info('-------------------------') gui_options = self.get_parameter('gui_options').value # e.g. to maximize screen: options="--width=2560 --height=1440" return self.pb.connect(self.pb.GUI, options=gui_options) else: # start simulation without gui (non-graphical version) self.get_logger().info('Running pybullet without gui') # hide console output from pybullet self.get_logger().info('-------------------------') return self.pb.connect(self.pb.DIRECT) def init_pybullet_robot(self): """load robot URDF model, set gravity, ground plane and environment""" # get from param server the path to the URDF robot model to load at startup urdf_path = self.get_parameter('robot_urdf_path').value if urdf_path == None: self.get_logger().warn('mandatory param robot_urdf_path not set, will exit now') rclpy.shutdown() # test urdf file existance if not os.path.isfile(urdf_path): self.get_logger().error('param robot_urdf_path is set, but file does not exist : ' + urdf_path) rclpy.shutdown() return None # ensure urdf is not xacro, but if it is then make urdf file version out of it if 'xacro' in urdf_path: # remove xacro from name urdf_path_without_xacro = urdf_path[0:urdf_path.find('.xacro')]+urdf_path[urdf_path.find('.xacro')+len('.xacro'):] os.system(f'xacro {urdf_path} -o {urdf_path_without_xacro}') urdf_path = urdf_path_without_xacro # get robot spawn pose from parameter server robot_pose_x = self.get_parameter('robot_pose_x').value robot_pose_y = self.get_parameter('robot_pose_y').value robot_pose_z = self.get_parameter('robot_pose_z').value robot_pose_yaw = self.get_parameter('robot_pose_yaw').value robot_spawn_orientation = self.pb.getQuaternionFromEuler([0.0, 0.0, robot_pose_yaw]) fixed_base = self.get_parameter('fixed_base').value # load robot from URDF model # user decides if inertia is computed automatically by pybullet or custom if self.get_parameter('use_inertia_from_file').value: # combining several boolean flags using "or" according to pybullet documentation urdf_flags = self.pb.URDF_USE_INERTIA_FROM_FILE | self.pb.URDF_USE_SELF_COLLISION else: urdf_flags = self.pb.URDF_USE_SELF_COLLISION # load environment self.get_logger().info('loading environment') self.environment.load_environment() # set no realtime simulation, NOTE: no need to stepSimulation if setRealTimeSimulation is set to 1 self.pb.setRealTimeSimulation(0) # NOTE: does not currently work with effort controller, thats why is left as 0 self.get_logger().info('loading urdf model: ' + urdf_path) # NOTE: self collision enabled by default return self.pb.loadURDF(urdf_path, basePosition=[robot_pose_x, robot_pose_y, robot_pose_z], baseOrientation=robot_spawn_orientation, useFixedBase=fixed_base, flags=urdf_flags) def handle_reset_simulation(self, req): """Callback to handle the service offered by this node to reset the simulation""" self.get_logger().info('reseting simulation now') # pause simulation to prevent reading joint values with an empty world self.pause_simulation = True # remove all objects from the world and reset the world to initial conditions self.pb.resetSimulation() # load URDF model again, set gravity and floor self.init_pybullet_robot() # resume simulation control cycle now that a new robot is in place self.pause_simulation = False return [] def handle_pause_physics(self, req): """pause simulation, raise flag to prevent pybullet to execute self.pb.stepSimulation()""" self.get_logger().info('pausing simulation') self.pause_simulation = False return [] def handle_unpause_physics(self, req): """unpause simulation, lower flag to allow pybullet to execute self.pb.stepSimulation()""" self.get_logger().info('unpausing simulation') self.pause_simulation = True return [] def pause_simulation_function(self): return self.pause_simulation def main(): try: rclpy.init() pyBulletRosWrapper() finally: rclpy.shutdown() if __name__ == '__main__': main()
pybullet_ros/pybullet_ros_wrapper.py
11,125
ROS wrapper class for pybullet simulator construct 3 dictionaries: - joint index to joint name x2 (1 for revolute, 1 for fixed joints) - link name to link index dictionary pause simulation, raise flag to prevent pybullet to execute self.pb.stepSimulation() Callback to handle the service offered by this node to reset the simulation Callback to handle the service offered by this node to reset the simulation unpause simulation, lower flag to allow pybullet to execute self.pb.stepSimulation() load robot URDF model, set gravity, ground plane and environment start physics engine (client) with or without gui !/usr/bin/env python3 import pybullet get from param server the frequency at which to run the simulation query from param server if gui is needed get from param server if user wants to pause simulation at startup print pybullet stuff in blue we dont need to store the physics client for now... setup service to restart simulation setup services for pausing/unpausing simulation get pybullet path in your system and store it internally for future use, e.g. to set floor create object of environment class for later use default : plugins/environment.py load robot URDF model, set gravity, and ground plane Error while loading urdf file get all revolute joint names and pybullet index import plugins dynamically return to normal shell color load plugins create object of the imported file class store objects in member variable for future useexcept Exception as e: self.get_logger().error(traceback.format_exc()) build a dictionary of link names to ids ensure we are dealing with a revolute joint insert key, value in dictionary (joint index, joint name) info[1] refers to joint name insert key, value in dictionary (joint index, joint name) info[1] refers to joint name info[1] refers to joint name start simulation with gui e.g. to maximize screen: options="--width=2560 --height=1440" start simulation without gui (non-graphical version) hide console output from pybullet get from param server the path to the URDF robot model to load at startup test urdf file existance ensure urdf is not xacro, but if it is then make urdf file version out of it remove xacro from name get robot spawn pose from parameter server load robot from URDF model user decides if inertia is computed automatically by pybullet or custom combining several boolean flags using "or" according to pybullet documentation load environment set no realtime simulation, NOTE: no need to stepSimulation if setRealTimeSimulation is set to 1 NOTE: does not currently work with effort controller, thats why is left as 0 NOTE: self collision enabled by default pause simulation to prevent reading joint values with an empty world remove all objects from the world and reset the world to initial conditions load URDF model again, set gravity and floor resume simulation control cycle now that a new robot is in place
2,890
en
0.80704
# Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST. # # According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).” # #   # Example 1: # # # Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8 # Output: 6 # Explanation: The LCA of nodes 2 and 8 is 6. # # # Example 2: # # # Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4 # Output: 2 # Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant of itself according to the LCA definition. # # # Example 3: # # # Input: root = [2,1], p = 2, q = 1 # Output: 2 # # #   # Constraints: # # # The number of nodes in the tree is in the range [2, 105]. # -109 <= Node.val <= 109 # All Node.val are unique. # p != q # p and q will exist in the BST. # # # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode': # 二叉搜索树 if p.val < root.val and q.val < root.val: return self.lowestCommonAncestor(root.left, p, q) if p.val > root.val and q.val > root.val: return self.lowestCommonAncestor(root.right, p, q) return root
0235-lowest-common-ancestor-of-a-binary-search-tree/lowest-common-ancestor-of-a-binary-search-tree.py
1,512
Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST. According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”   Example 1: Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8 Output: 6 Explanation: The LCA of nodes 2 and 8 is 6. Example 2: Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4 Output: 2 Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant of itself according to the LCA definition. Example 3: Input: root = [2,1], p = 2, q = 1 Output: 2   Constraints: The number of nodes in the tree is in the range [2, 105]. -109 <= Node.val <= 109 All Node.val are unique. p != q p and q will exist in the BST. Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None 二叉搜索树
1,032
en
0.866367
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Union from .. import utilities, tables class AnalyticsConfiguration(pulumi.CustomResource): bucket: pulumi.Output[str] """ The name of the bucket this analytics configuration is associated with. """ filter: pulumi.Output[dict] """ Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). * `prefix` (`str`) - Object prefix for filtering. * `tags` (`dict`) - Set of object tags for filtering. """ name: pulumi.Output[str] """ Unique identifier of the analytics configuration for the bucket. """ storage_class_analysis: pulumi.Output[dict] """ Configuration for the analytics data export (documented below). * `dataExport` (`dict`) - Data export configuration (documented below). * `destination` (`dict`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`dict`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`str`) - The account ID that owns the destination bucket. * `bucketArn` (`str`) - The ARN of the destination bucket. * `format` (`str`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`str`) - Object prefix for filtering. * `outputSchemaVersion` (`str`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None): """ Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource. ## Example Usage ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") analytics = aws.s3.Bucket("analytics") example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket", bucket=example.bucket, storage_class_analysis={ "dataExport": { "destination": { "s3BucketDestination": { "bucketArn": analytics.arn, }, }, }, }) ``` ### Add analytics configuration with S3 bucket object filter ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") example_filtered = aws.s3.AnalyticsConfiguration("example-filtered", bucket=example.bucket, filter={ "prefix": "documents/", "tags": { "priority": "high", "class": "blue", }, }) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if bucket is None: raise TypeError("Missing required property 'bucket'") __props__['bucket'] = bucket __props__['filter'] = filter __props__['name'] = name __props__['storage_class_analysis'] = storage_class_analysis super(AnalyticsConfiguration, __self__).__init__( 'aws:s3/analyticsConfiguration:AnalyticsConfiguration', resource_name, __props__, opts) @staticmethod def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None): """ Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["bucket"] = bucket __props__["filter"] = filter __props__["name"] = name __props__["storage_class_analysis"] = storage_class_analysis return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
sdk/python/pulumi_aws/s3/analytics_configuration.py
9,405
Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource. ## Example Usage ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") analytics = aws.s3.Bucket("analytics") example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket", bucket=example.bucket, storage_class_analysis={ "dataExport": { "destination": { "s3BucketDestination": { "bucketArn": analytics.arn, }, }, }, }) ``` ### Add analytics configuration with S3 bucket object filter ```python import pulumi import pulumi_aws as aws example = aws.s3.Bucket("example") example_filtered = aws.s3.AnalyticsConfiguration("example-filtered", bucket=example.bucket, filter={ "prefix": "documents/", "tags": { "priority": "high", "class": "blue", }, }) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with. :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket. :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below). The **filter** object supports the following: * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering. The **storage_class_analysis** object supports the following: * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below). * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below). * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below). * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket. * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket. * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`. * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering. * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`. coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
4,902
en
0.47917
import asyncio import google.protobuf.any_pb2 import irsdk from asyncio import Queue from google.protobuf.any_pb2 import Any from TelemetryDataUtils import getInfo, getGeneral from models import typed_message_pb2 from models.State import State class TelemetryLogger: def __init__(self, receiver_queue: Queue, pushable_queue: Queue, streaming_queue: Queue): self.state = State() self.ir = irsdk.IRSDK() self.should_run = False self.receiver_queue = receiver_queue self.pushable_queue = pushable_queue self.streaming_queue = streaming_queue self.sentGeneralData = False async def run(self): # TODO: Fix this while True: self.should_run = await self.receiver_queue.get() if self.should_run: break while self.should_run: self.is_sim_running() if self.state.ir_connected: await self.get_iracing_data() await asyncio.sleep(5) # await asyncio.sleep(0.1) # TODO: Figure out how to shut off if told too def is_sim_running(self): if self.state.ir_connected and not (self.ir.is_initialized and self.ir.is_connected): self.state.ir_connected = False # don"t forget to reset your State variables self.state.last_car_setup_tick = -1 # we are shutting down ir library (clearing all internal variables) self.ir.shutdown() print("irsdk disconnected") elif not self.state.ir_connected and self.ir.startup( test_file="./data/data.bin") and self.ir.is_initialized and self.ir.is_connected: self.state.ir_connected = True print("irsdk connected") async def get_iracing_data(self): # data per tick since data can change midway self.ir.freeze_var_buffer_latest() streamable: typed_message_pb2.TypedMessage = typed_message_pb2.TypedMessage() streamable.type = typed_message_pb2.TypedMessage.Type.LOGGER_STREAM streamable.message.Pack(getInfo(ir=self.ir)) pushable: typed_message_pb2.TypedMessage = typed_message_pb2.TypedMessage() pushable.type = typed_message_pb2.TypedMessage.Type.LOGGER_UPDATE pushable.message.Pack(getGeneral(self.ir)) if not self.sentGeneralData: await self.streaming_queue.put(pushable.SerializeToString()) self.sentGeneralData = True await self.streaming_queue.put(streamable.SerializeToString())
logger/TelemetryLogger.py
2,566
TODO: Fix this await asyncio.sleep(0.1) TODO: Figure out how to shut off if told too don"t forget to reset your State variables we are shutting down ir library (clearing all internal variables) data per tick since data can change midway
236
en
0.746528
from importlib.resources import path import sys import os import shutil from git import Repo from subprocess import call from git import RemoteProgress import git from tqdm import tqdm from pathlib import Path dir_path = (os.path.expanduser('~/Documents') + "\server") os.chdir(dir_path) gitaddress = str("https://github.com/0xol/server") print("what server version would you like to install") print("format is 'client-version'") print("example 'forge-1.16.5' or 'vanilla-1.7.10'") print("for lists of supported server version check https://github.com/0xol/server and check under branches") branch = input() os.system("del /F /S /Q /A .git") os.system("del /F /S /Q /A .git") #just in case the program didnt kill it the first time folder = dir_path for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) class CloneProgress(RemoteProgress): def __init__(self): super().__init__() self.pbar = tqdm() def update(self, op_code, cur_count, max_count=None, message=''): self.pbar.total = max_count self.pbar.n = cur_count self.pbar.refresh() print(dir_path) Repo.clone_from(gitaddress, dir_path , branch=branch, progress=CloneProgress())
main.py
1,499
just in case the program didnt kill it the first time
53
en
0.929999
############################################################################### # # Tests for XlsxWriter. # # SPDX-License-Identifier: BSD-2-Clause # Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org # from ..excel_comparison_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('chart_scatter03.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'}) chart.axis_ids = [54010624, 45705856] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'}) chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5', }) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
xlsxwriter/test/comparison/test_chart_scatter03.py
1,455
Test file created by XlsxWriter against a file created by Excel. Test the creation of a simple XlsxWriter file. Tests for XlsxWriter. SPDX-License-Identifier: BSD-2-Clause Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
233
en
0.852814
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Algorithmic data generators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import generator_utils as utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.layers import modalities from tensor2tensor.utils import metrics from tensor2tensor.utils import registry import tensorflow as tf class AlgorithmicProblem(problem.Problem): """Base class for algorithmic problems.""" @property def num_symbols(self): raise NotImplementedError() def generator(self, nbr_symbols, max_length, nbr_cases): """Generates the data.""" raise NotImplementedError() @property def train_length(self): return 40 @property def dev_length(self): return 400 @property def train_size(self): return 100000 @property def dev_size(self): return 10000 @property def num_shards(self): return 10 def generate_data(self, data_dir, _, task_id=-1): def generator_eos(nbr_symbols, max_length, nbr_cases): """Shift by NUM_RESERVED_IDS and append EOS token.""" for case in self.generator(nbr_symbols, max_length, nbr_cases): new_case = {} for feature in case: new_case[feature] = [ i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature] ] + [text_encoder.EOS_ID] yield new_case utils.generate_dataset_and_shuffle( generator_eos(self.num_symbols, self.train_length, self.train_size), self.training_filepaths(data_dir, self.num_shards, shuffled=True), generator_eos(self.num_symbols, self.dev_length, self.dev_size), self.dev_filepaths(data_dir, 1, shuffled=True), shuffle=False) def hparams(self, defaults, unused_model_hparams): p = defaults vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS p.modality = {"inputs": modalities.ModalityType.SYMBOL, "targets": modalities.ModalityType.SYMBOL} p.vocab_size = {"inputs": vocab_size, "targets": vocab_size} p.input_space_id = problem.SpaceID.DIGIT_0 p.target_space_id = problem.SpaceID.DIGIT_1 @registry.register_problem class AlgorithmicIdentityBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary identity task.""" @property def num_symbols(self): return 2 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the identity (copy) task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list and target-list are the same. """ for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols) for _ in range(l)] yield {"inputs": inputs, "targets": inputs} @registry.register_problem class AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40): """Problem spec for algorithmic decimal identity task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicShiftDecimal40(AlgorithmicProblem): """Problem spec for algorithmic decimal shift task.""" @property def num_symbols(self): return 20 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the shift task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols - shift] until nbr_cases sequences have been produced (output[i] = input[i] + shift). Args: nbr_symbols: number of symbols to use in each sequence (input + output). max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list[i] = input-list[i] + shift. """ shift = 10 for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)] yield {"inputs": inputs, "targets": [i + shift for i in inputs]} @property def dev_length(self): return 80 @registry.register_problem class AlgorithmicReverseBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary reversing task.""" @property def num_symbols(self): return 2 def generator(self, nbr_symbols, max_length, nbr_cases): """Generator for the reversing task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. """ for _ in range(nbr_cases): l = np.random.randint(max_length) + 1 inputs = [np.random.randint(nbr_symbols) for _ in range(l)] yield {"inputs": inputs, "targets": list(reversed(inputs))} @registry.register_problem class AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40): """Problem spec for algorithmic decimal reversing task.""" @property def num_symbols(self): return 10 def zipf_distribution(nbr_symbols, alpha): """Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols. """ tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) zeta = np.r_[0.0, np.cumsum(tmp)] return [x / zeta[-1] for x in zeta] def zipf_random_sample(distr_map, sample_len): """Helper function: Generate a random Zipf sample of given length. Args: distr_map: list of float, Zipf's distribution over nbr_symbols. sample_len: integer, length of sequence to generate. Returns: sample: list of integer, Zipf's random sample over nbr_symbols. """ u = np.random.random(sample_len) # Random produces values in range [0.0,1.0); even if it is almost # improbable(but possible) that it can generate a clear 0.000..0. return list(np.searchsorted(distr_map, u)) def reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, scale_std_dev=100, alpha=1.5): """Generator for the reversing nlp-like task on sequences of symbols. The length of the sequence is drawn from a Gaussian(Normal) distribution at random from [1, max_length] and with std deviation of 1%, then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: integer, number of symbols. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. scale_std_dev: float, Normal distribution's standard deviation scale factor used to draw the length of sequence. Default = 1% of the max_length. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. """ std_dev = max_length / scale_std_dev distr_map = zipf_distribution(nbr_symbols, alpha) for _ in range(nbr_cases): l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1) inputs = zipf_random_sample(distr_map, l) yield {"inputs": inputs, "targets": list(reversed(inputs))} @registry.register_problem class AlgorithmicReverseNlplike8k(AlgorithmicProblem): """Problem spec for algorithmic nlp-like reversing task.""" @property def num_symbols(self): return 8000 def generator(self, nbr_symbols, max_length, nbr_cases): return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, 1.300) @property def train_length(self): return 70 @property def dev_length(self): return 70 @registry.register_problem class AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k): """Problem spec for algorithmic nlp-like reversing task, 32k vocab.""" @property def num_symbols(self): return 32000 def generator(self, nbr_symbols, max_length, nbr_cases): return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, 1.050) def lower_endian_to_number(l, base): """Helper function: convert a list of digits in the given base to a number.""" return sum([d * (base**i) for i, d in enumerate(l)]) def number_to_lower_endian(n, base): """Helper function: convert a number to a list of digits in the given base.""" if n < base: return [n] return [n % base] + number_to_lower_endian(n // base, base) def random_number_lower_endian(length, base): """Helper function: generate a random number as a lower-endian digits list.""" if length == 1: # Last digit can be 0 only if length is 1. return [np.random.randint(base)] prefix = [np.random.randint(base) for _ in range(length - 1)] return prefix + [np.random.randint(base - 1) + 1] # Last digit is not 0. @registry.register_problem class AlgorithmicAdditionBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary addition task.""" @property def num_symbols(self): return 2 def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ """Generator for the addition task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are added and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of adding them. Raises: ValueError: if max_length is lower than 3. """ if max_length < 3: raise ValueError("Maximum length must be at least 3.") for _ in range(nbr_cases): l1 = np.random.randint(max_length // 2) + 1 l2 = np.random.randint(max_length - l1 - 1) + 1 n1 = random_number_lower_endian(l1, base) n2 = random_number_lower_endian(l2, base) result = lower_endian_to_number(n1, base) + lower_endian_to_number( n2, base) inputs = n1 + [base] + n2 targets = number_to_lower_endian(result, base) yield {"inputs": inputs, "targets": targets} @registry.register_problem class AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40): """Problem spec for algorithmic decimal addition task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicMultiplicationBinary40(AlgorithmicProblem): """Problem spec for algorithmic binary multiplication task.""" @property def num_symbols(self): return 2 def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ """Generator for the multiplication task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are multiplied and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of multiplying them. Raises: ValueError: if max_length is lower than 3. """ if max_length < 3: raise ValueError("Maximum length must be at least 3.") for _ in range(nbr_cases): l1 = np.random.randint(max_length // 2) + 1 l2 = np.random.randint(max_length - l1 - 1) + 1 n1 = random_number_lower_endian(l1, base) n2 = random_number_lower_endian(l2, base) result = lower_endian_to_number(n1, base) * lower_endian_to_number( n2, base) inputs = n1 + [base] + n2 targets = number_to_lower_endian(result, base) yield {"inputs": inputs, "targets": targets} @registry.register_problem class AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40): """Problem spec for algorithmic decimal multiplication task.""" @property def num_symbols(self): return 10 @registry.register_problem class AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40): """Test Problem with tiny dataset.""" @property def train_length(self): return 10 @property def dev_length(self): return 10 @property def train_size(self): return 1000 @property def dev_size(self): return 100 @property def num_shards(self): return 1 @registry.register_problem class AlgorithmicSortProblem(AlgorithmicProblem): """Problem spec for sorting numbers.""" @property def num_symbols(self): return max(self.train_length, self.dev_length) @property def train_length(self): return 10 @property def dev_length(self): return self.train_length * 2 @property def unique(self): """Unique numbers wo/ replacement or w/ replacement in sorting task.""" return False def generator(self, nbr_symbols, max_length, nbr_cases): """Generating for sorting task on sequence of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list sorted. """ for _ in range(nbr_cases): # Sample the sequence length. length = np.random.randint(max_length) + 1 if self.unique: # Sample our inputs w/o replacement. inputs = np.arange(nbr_symbols) np.random.shuffle(inputs) # Truncate to the desired length. inputs = inputs[:length] inputs = list(inputs) else: inputs = list(np.random.randint(nbr_symbols, size=length)) # Targets are simply the sorted inputs. targets = list(sorted(inputs)) yield {"inputs": inputs, "targets": targets} def eval_metrics(self): defaults = super(AlgorithmicSortProblem, self).eval_metrics() return defaults + [metrics.Metrics.EDIT_DISTANCE] @registry.register_problem class TinyAlgo(AlgorithmicIdentityBinary40): """A small algorthmic problem for testing.""" def generate_data(self, data_dir, tmp_dir, task_id=-1): """Ganerate data for this problem.""" del tmp_dir, task_id identity_problem = AlgorithmicIdentityBinary40() utils.generate_files( identity_problem.generator(self.num_symbols, 40, 100000), self.training_filepaths(data_dir, 1, shuffled=True), 100) utils.generate_files( identity_problem.generator(self.num_symbols, 400, 10000), self.dev_filepaths(data_dir, 1, shuffled=True), 100) @classmethod def setup_for_test(cls): """Setup directories and files required to run the problem.""" tmp_dir = tf.test.get_temp_dir() shutil.rmtree(tmp_dir) os.mkdir(tmp_dir) cls.data_dir = tmp_dir # Generate a small test dataset cls().generate_data(TinyAlgo.data_dir, None)
tensor2tensor/data_generators/algorithmic.py
17,505
Problem spec for algorithmic binary addition task. Problem spec for algorithmic decimal addition task. Problem spec for algorithmic binary identity task. Problem spec for algorithmic decimal identity task. Problem spec for algorithmic binary multiplication task. Problem spec for algorithmic decimal multiplication task. Base class for algorithmic problems. Problem spec for algorithmic binary reversing task. Test Problem with tiny dataset. Problem spec for algorithmic decimal reversing task. Problem spec for algorithmic nlp-like reversing task, 32k vocab. Problem spec for algorithmic nlp-like reversing task. Problem spec for algorithmic decimal shift task. Problem spec for sorting numbers. A small algorthmic problem for testing. Ganerate data for this problem. Generates the data. Generator for the identity (copy) task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list and target-list are the same. Generator for the shift task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols - shift] until nbr_cases sequences have been produced (output[i] = input[i] + shift). Args: nbr_symbols: number of symbols to use in each sequence (input + output). max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list[i] = input-list[i] + shift. Generator for the reversing task on sequences of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. Generator for the addition task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are added and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of adding them. Raises: ValueError: if max_length is lower than 3. Generator for the multiplication task. The length of each number is drawn uniformly at random in [1, max_length/2] and then digits are drawn uniformly at random. The numbers are multiplied and separated by [base] in the input. Stops at nbr_cases. Args: base: in which base are the numbers. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where input-list are the 2 numbers and target-list is the result of multiplying them. Raises: ValueError: if max_length is lower than 3. Generating for sorting task on sequence of symbols. The length of the sequence is drawn uniformly at random from [1, max_length] and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: number of symbols to use in each sequence. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list sorted. Shift by NUM_RESERVED_IDS and append EOS token. Helper function: convert a list of digits in the given base to a number. Helper function: convert a number to a list of digits in the given base. Helper function: generate a random number as a lower-endian digits list. Generator for the reversing nlp-like task on sequences of symbols. The length of the sequence is drawn from a Gaussian(Normal) distribution at random from [1, max_length] and with std deviation of 1%, then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: nbr_symbols: integer, number of symbols. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. scale_std_dev: float, Normal distribution's standard deviation scale factor used to draw the length of sequence. Default = 1% of the max_length. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. Setup directories and files required to run the problem. Unique numbers wo/ replacement or w/ replacement in sorting task. Helper function: Create a Zipf distribution. Args: nbr_symbols: number of symbols to use in the distribution. alpha: float, Zipf's Law Distribution parameter. Default = 1.5. Usually for modelling natural text distribution is in the range [1.1-1.6]. Returns: distr_map: list of float, Zipf's distribution over nbr_symbols. Helper function: Generate a random Zipf sample of given length. Args: distr_map: list of float, Zipf's distribution over nbr_symbols. sample_len: integer, length of sequence to generate. Returns: sample: list of integer, Zipf's random sample over nbr_symbols. Algorithmic data generators. coding=utf-8 Copyright 2018 The Tensor2Tensor Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=redefined-builtin Random produces values in range [0.0,1.0); even if it is almost improbable(but possible) that it can generate a clear 0.000..0. Last digit can be 0 only if length is 1. Last digit is not 0. pylint: disable=arguments-differ pylint: disable=arguments-differ Sample the sequence length. Sample our inputs w/o replacement. Truncate to the desired length. Targets are simply the sorted inputs. Generate a small test dataset
7,215
en
0.833659
from flask import Flask app = Flask(__name__, static_url_path='', static_folder='static') app.config['DEBUG'] = True @app.route('/') def root(): # Note: this is probably handled by the app engine static file handler. return app.send_static_file('index.html') @app.errorhandler(404) def page_not_found(e): """Return a custom 404 error.""" return 'Sorry, nothing at this URL.', 404
main.py
394
Return a custom 404 error. Note: this is probably handled by the app engine static file handler.
98
en
0.841423
""" Generating data from the CarRacing gym environment. !!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!! """ import argparse from os.path import join, exists import gym import numpy as np from utils.misc import sample_continuous_policy def generate_data(rollouts, data_dir, noise_type): # pylint: disable=R0914 """ Generates data """ assert exists(data_dir), "The data directory does not exist..." env = gym.make("CarRacing-v0") seq_len = 1000 for i in range(rollouts): env.reset() env.env.viewer.window.dispatch_events() if noise_type == 'white': a_rollout = [env.action_space.sample() for _ in range(seq_len)] elif noise_type == 'brown': a_rollout = sample_continuous_policy(env.action_space, seq_len, 1. / 50) s_rollout = [] r_rollout = [] d_rollout = [] t = 0 while True: action = a_rollout[t] t += 1 # The CarRacing-v0 environment has a step limit of 1000, this can be seen in env.spec.max_episode_steps s, r, done, _ = env.step(action) env.env.viewer.window.dispatch_events() s_rollout += [s] r_rollout += [r] d_rollout += [done] if done: # Because these are random policies, most of them will not be done before the step limit of 1000 print("> End of rollout {}, {} frames...".format(i, len(s_rollout))) np.savez(join(data_dir, 'rollout_{}'.format(i)), observations=np.array(s_rollout), rewards=np.array(r_rollout), actions=np.array(a_rollout), terminals=np.array(d_rollout)) break if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--rollouts', type=int, help="Number of rollouts") parser.add_argument('--dir', type=str, help="Where to place rollouts") parser.add_argument('--policy', type=str, choices=['white', 'brown'], help='Noise type used for action sampling.', default='brown') args = parser.parse_args() generate_data(args.rollouts, args.dir, args.policy)
data/carracing.py
2,280
Generates data Generating data from the CarRacing gym environment. !!! DOES NOT WORK ON TITANIC, DO IT AT HOME, THEN SCP !!! pylint: disable=R0914 The CarRacing-v0 environment has a step limit of 1000, this can be seen in env.spec.max_episode_steps Because these are random policies, most of them will not be done before the step limit of 1000
346
en
0.864667
""" Entradas Monto de dinero -> int -> a """ a = int ( input ( "Ingrese monto de dinero en COP:" )) b = a billetes_de_100000 = ( b - b % 100000 ) / 100000 b = b % 100000 billetes_de_50000 = ( b - b % 50000 ) / 50000 b = b % 50000 billetes_de_20000 = ( b - b % 20000 ) / 20000 b = b % 20000 billetes_de_10000 = ( b - b % 10000 ) / 10000 b = b % 10000 billetes_de_5000 = ( b - b % 5000 ) / 5000 b = b % 5000 billetes_de_2000 = ( b - b % 2000 ) / 2000 b = b % 2000 billetes_de_1000 = ( b - b % 1000 ) / 1000 b = b % 1000 monedas_de_500 = ( b - b % 500 ) / 500 b = b % 500 monedas_de_200 = ( b - b % 200 ) / 200 b = b % 200 monedas_de_100 = ( b - b % 100 ) / 100 b = b % 100 monedas_de_50 = ( b - b % 50 ) / 50 b = b % 50 print ( "La Cantidad de billetes de 100000 es de:" + str ( billetes_de_100000 )) print ( "La Cantidad de billetes de 50000 es de:" + str ( billetes_de_50000 )) print ( "La Cantidad de billetes de 20000 es de:" + str ( billetes_de_20000 )) print ( "La Cantidad de billetes de 10000 es de:" + str ( billetes_de_10000 )) print ( "La Cantidad de billetes de 5000 es de:" + str ( billetes_de_5000 )) print ( "La Cantidad de billetes de 2000 es de:" + str ( billetes_de_2000 )) print ( "La Cantidad de billetes de 1000 es de:" + str ( billetes_de_1000 )) print ( "La Cantidad de monedas de 500 es de:" + str ( monedas_de_500 )) print ( "La Cantidad de monedas de 200 es de:" + str ( monedas_de_200 )) print ( "La Cantidad de monedas de 100 es de:" + str ( monedas_de_100 )) print ( "La Cantidad de monedas de 50 es de:" + str ( monedas_de_50 ))
taller_estructuras_de_control/ejercicio12.py
1,556
Entradas Monto de dinero -> int -> a
36
es
0.701548
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import requests import json import datetime url = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid=b10c5ed1-bad1-445f-b386-b919946339a7' r = requests.get(url) service_list = r.json() lurls= [] lips = [] for service in service_list: for url in service.get('urls', []): lurls.append(url.replace('*', '')) for ip in service.get('ips', []): lips.append(ip) warninglist = {} warninglist['name'] = 'List of known Office 365 URLs address ranges' warninglist['version'] = int(datetime.date.today().strftime('%Y%m%d')) warninglist['description'] = 'Office 365 URLs and IP address ranges' warninglist['type'] = 'string' warninglist['list'] = sorted(set(lurls)) warninglist['matching_attributes'] = ["domain", "domain|ip", "hostname"] with open('../lists/microsoft-office365/list.json', 'w') as data_file: json.dump(warninglist, data_file, indent=4, sort_keys=True) warninglist = {} warninglist['name'] = 'List of known Office 365 IP address ranges' warninglist['version'] = int(datetime.date.today().strftime('%Y%m%d')) warninglist['description'] = 'Office 365 URLs and IP address ranges' warninglist['list'] = sorted(set(lips)) warninglist['type'] = 'cidr' warninglist['matching_attributes'] = ["ip-src", "ip-dst", "domain|ip"] with open('../lists/microsoft-office365-ip/list.json', 'w') as data_file: json.dump(warninglist, data_file, indent=4, sort_keys=True)
tools/generate-office365.py
1,458
!/usr/bin/env python3 -*- coding: utf-8 -*-
43
fr
0.304089
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = [ 'AcceleratorConfigArgs', 'AccessConfigArgs', 'AdvancedMachineFeaturesArgs', 'AliasIpRangeArgs', 'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs', 'AllocationSpecificSKUAllocationReservedInstancePropertiesArgs', 'AllocationSpecificSKUReservationArgs', 'AttachedDiskInitializeParamsArgs', 'AttachedDiskArgs', 'AuditConfigArgs', 'AuditLogConfigArgs', 'AuthorizationLoggingOptionsArgs', 'AutoscalingPolicyCpuUtilizationArgs', 'AutoscalingPolicyCustomMetricUtilizationArgs', 'AutoscalingPolicyLoadBalancingUtilizationArgs', 'AutoscalingPolicyScaleDownControlArgs', 'AutoscalingPolicyScaleInControlArgs', 'AutoscalingPolicyArgs', 'BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs', 'BackendBucketCdnPolicyCacheKeyPolicyArgs', 'BackendBucketCdnPolicyNegativeCachingPolicyArgs', 'BackendBucketCdnPolicyArgs', 'BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs', 'BackendServiceCdnPolicyNegativeCachingPolicyArgs', 'BackendServiceCdnPolicyArgs', 'BackendServiceConnectionTrackingPolicyArgs', 'BackendServiceFailoverPolicyArgs', 'BackendServiceIAPOAuth2ClientInfoArgs', 'BackendServiceIAPArgs', 'BackendServiceLogConfigArgs', 'BackendArgs', 'BindingArgs', 'CacheKeyPolicyArgs', 'CallCredentialsArgs', 'ChannelCredentialsArgs', 'CircuitBreakersArgs', 'ConditionArgs', 'ConfidentialInstanceConfigArgs', 'ConnectionDrainingArgs', 'ConsistentHashLoadBalancerSettingsHttpCookieArgs', 'ConsistentHashLoadBalancerSettingsArgs', 'CorsPolicyArgs', 'CustomerEncryptionKeyArgs', 'DeprecationStatusArgs', 'DiskInstantiationConfigArgs', 'DisplayDeviceArgs', 'DistributionPolicyZoneConfigurationArgs', 'DistributionPolicyArgs', 'DurationArgs', 'ExprArgs', 'ExternalVpnGatewayInterfaceArgs', 'FileContentBufferArgs', 'FirewallAllowedItemArgs', 'FirewallDeniedItemArgs', 'FirewallLogConfigArgs', 'FirewallPolicyAssociationArgs', 'FirewallPolicyRuleMatcherLayer4ConfigArgs', 'FirewallPolicyRuleMatcherArgs', 'FirewallPolicyRuleSecureTagArgs', 'FirewallPolicyRuleArgs', 'FixedOrPercentArgs', 'ForwardingRuleServiceDirectoryRegistrationArgs', 'FutureReservationSpecificSKUPropertiesArgs', 'FutureReservationTimeWindowArgs', 'GRPCHealthCheckArgs', 'GrpcServiceConfigArgs', 'GuestOsFeatureArgs', 'HTTP2HealthCheckArgs', 'HTTPHealthCheckArgs', 'HTTPSHealthCheckArgs', 'HealthCheckLogConfigArgs', 'HostRuleArgs', 'HttpFaultAbortArgs', 'HttpFaultDelayArgs', 'HttpFaultInjectionArgs', 'HttpFilterConfigArgs', 'HttpHeaderActionArgs', 'HttpHeaderMatchArgs', 'HttpHeaderOptionArgs', 'HttpQueryParameterMatchArgs', 'HttpRedirectActionArgs', 'HttpRetryPolicyArgs', 'HttpRouteActionArgs', 'HttpRouteRuleMatchArgs', 'HttpRouteRuleArgs', 'ImageRawDiskArgs', 'InitialStateConfigArgs', 'InstanceGroupManagerAllInstancesConfigArgs', 'InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs', 'InstanceGroupManagerAutoHealingPolicyArgs', 'InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs', 'InstanceGroupManagerInstanceLifecyclePolicyArgs', 'InstanceGroupManagerStandbyPolicyArgs', 'InstanceGroupManagerUpdatePolicyArgs', 'InstanceGroupManagerVersionArgs', 'InstanceParamsArgs', 'InstancePropertiesPatchArgs', 'InstancePropertiesArgs', 'Int64RangeMatchArgs', 'InterconnectAttachmentPartnerMetadataArgs', 'InterconnectMacsecPreSharedKeyArgs', 'InterconnectMacsecArgs', 'LicenseResourceCommitmentArgs', 'LicenseResourceRequirementsArgs', 'LocalDiskArgs', 'LogConfigCloudAuditOptionsArgs', 'LogConfigCounterOptionsCustomFieldArgs', 'LogConfigCounterOptionsArgs', 'LogConfigDataAccessOptionsArgs', 'LogConfigArgs', 'MetadataCredentialsFromPluginArgs', 'MetadataFilterLabelMatchArgs', 'MetadataFilterArgs', 'MetadataItemsItemArgs', 'MetadataArgs', 'NamedPortArgs', 'NetworkEndpointGroupAppEngineArgs', 'NetworkEndpointGroupCloudFunctionArgs', 'NetworkEndpointGroupCloudRunArgs', 'NetworkEndpointGroupServerlessDeploymentArgs', 'NetworkInterfaceSubInterfaceArgs', 'NetworkInterfaceArgs', 'NetworkPerformanceConfigArgs', 'NetworkRoutingConfigArgs', 'NodeGroupAutoscalingPolicyArgs', 'NodeGroupMaintenanceWindowArgs', 'NodeTemplateNodeTypeFlexibilityArgs', 'NotificationEndpointGrpcSettingsArgs', 'OutlierDetectionArgs', 'PacketMirroringFilterArgs', 'PacketMirroringForwardingRuleInfoArgs', 'PacketMirroringMirroredResourceInfoInstanceInfoArgs', 'PacketMirroringMirroredResourceInfoSubnetInfoArgs', 'PacketMirroringMirroredResourceInfoArgs', 'PacketMirroringNetworkInfoArgs', 'PathMatcherArgs', 'PathRuleArgs', 'PublicDelegatedPrefixPublicDelegatedSubPrefixArgs', 'RequestMirrorPolicyArgs', 'ReservationAffinityArgs', 'ReservationArgs', 'ResourceCommitmentArgs', 'ResourcePolicyDailyCycleArgs', 'ResourcePolicyGroupPlacementPolicyArgs', 'ResourcePolicyHourlyCycleArgs', 'ResourcePolicyInstanceSchedulePolicyScheduleArgs', 'ResourcePolicyInstanceSchedulePolicyArgs', 'ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs', 'ResourcePolicySnapshotSchedulePolicyScheduleArgs', 'ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs', 'ResourcePolicySnapshotSchedulePolicyArgs', 'ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs', 'ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs', 'ResourcePolicyVmMaintenancePolicyArgs', 'ResourcePolicyWeeklyCycleDayOfWeekArgs', 'ResourcePolicyWeeklyCycleArgs', 'RolloutPolicyArgs', 'RouterAdvertisedIpRangeArgs', 'RouterBgpPeerBfdArgs', 'RouterBgpPeerArgs', 'RouterBgpArgs', 'RouterInterfaceArgs', 'RouterNatLogConfigArgs', 'RouterNatRuleActionArgs', 'RouterNatRuleArgs', 'RouterNatSubnetworkToNatArgs', 'RouterNatArgs', 'RuleArgs', 'SSLHealthCheckArgs', 'SavedDiskArgs', 'SchedulingNodeAffinityArgs', 'SchedulingArgs', 'SdsConfigArgs', 'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs', 'SecurityPolicyAdaptiveProtectionConfigArgs', 'SecurityPolicyAdvancedOptionsConfigArgs', 'SecurityPolicyAssociationArgs', 'SecurityPolicyCloudArmorConfigArgs', 'SecurityPolicyDdosProtectionConfigArgs', 'SecurityPolicyRecaptchaOptionsConfigArgs', 'SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs', 'SecurityPolicyRuleHttpHeaderActionArgs', 'SecurityPolicyRuleMatcherConfigDestinationPortArgs', 'SecurityPolicyRuleMatcherConfigLayer4ConfigArgs', 'SecurityPolicyRuleMatcherConfigArgs', 'SecurityPolicyRuleMatcherArgs', 'SecurityPolicyRuleRateLimitOptionsThresholdArgs', 'SecurityPolicyRuleRateLimitOptionsArgs', 'SecurityPolicyRuleRedirectOptionsArgs', 'SecurityPolicyRuleArgs', 'SecuritySettingsArgs', 'ServerBindingArgs', 'ServerTlsSettingsArgs', 'ServiceAccountArgs', 'ServiceAttachmentConsumerProjectLimitArgs', 'ShareSettingsArgs', 'ShieldedInstanceConfigArgs', 'ShieldedInstanceIntegrityPolicyArgs', 'ShieldedVmConfigArgs', 'ShieldedVmIntegrityPolicyArgs', 'SourceDiskEncryptionKeyArgs', 'SourceInstanceParamsArgs', 'SslCertificateManagedSslCertificateArgs', 'SslCertificateSelfManagedSslCertificateArgs', 'StatefulPolicyPreservedStateArgs', 'StatefulPolicyArgs', 'SubnetworkLogConfigArgs', 'SubnetworkSecondaryRangeArgs', 'SubsettingArgs', 'TCPHealthCheckArgs', 'TagsArgs', 'TlsCertificateContextArgs', 'TlsCertificatePathsArgs', 'TlsContextArgs', 'TlsValidationContextArgs', 'UDPHealthCheckArgs', 'UrlMapTestHeaderArgs', 'UrlMapTestArgs', 'UrlRewriteArgs', 'VpnGatewayVpnGatewayInterfaceArgs', 'WeightedBackendServiceArgs', ] @pulumi.input_type class AcceleratorConfigArgs: def __init__(__self__, *, accelerator_count: Optional[pulumi.Input[int]] = None, accelerator_type: Optional[pulumi.Input[str]] = None): """ A specification of the type and number of accelerator cards attached to the instance. :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance. :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. """ if accelerator_count is not None: pulumi.set(__self__, "accelerator_count", accelerator_count) if accelerator_type is not None: pulumi.set(__self__, "accelerator_type", accelerator_type) @property @pulumi.getter(name="acceleratorCount") def accelerator_count(self) -> Optional[pulumi.Input[int]]: """ The number of the guest accelerator cards exposed to this instance. """ return pulumi.get(self, "accelerator_count") @accelerator_count.setter def accelerator_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "accelerator_count", value) @property @pulumi.getter(name="acceleratorType") def accelerator_type(self) -> Optional[pulumi.Input[str]]: """ Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. """ return pulumi.get(self, "accelerator_type") @accelerator_type.setter def accelerator_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "accelerator_type", value) @pulumi.input_type class AccessConfigArgs: def __init__(__self__, *, external_ipv6: Optional[pulumi.Input[str]] = None, external_ipv6_prefix_length: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, nat_ip: Optional[pulumi.Input[str]] = None, network_tier: Optional[pulumi.Input['AccessConfigNetworkTier']] = None, public_ptr_domain_name: Optional[pulumi.Input[str]] = None, set_public_dns: Optional[pulumi.Input[bool]] = None, set_public_ptr: Optional[pulumi.Input[bool]] = None, type: Optional[pulumi.Input['AccessConfigType']] = None): """ An access configuration attached to an instance's network interface. Only one access config per instance is supported. :param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. :param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range. :param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. :param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. :param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. :param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. :param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. :param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. :param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT. """ if external_ipv6 is not None: pulumi.set(__self__, "external_ipv6", external_ipv6) if external_ipv6_prefix_length is not None: pulumi.set(__self__, "external_ipv6_prefix_length", external_ipv6_prefix_length) if name is not None: pulumi.set(__self__, "name", name) if nat_ip is not None: pulumi.set(__self__, "nat_ip", nat_ip) if network_tier is not None: pulumi.set(__self__, "network_tier", network_tier) if public_ptr_domain_name is not None: pulumi.set(__self__, "public_ptr_domain_name", public_ptr_domain_name) if set_public_dns is not None: pulumi.set(__self__, "set_public_dns", set_public_dns) if set_public_ptr is not None: pulumi.set(__self__, "set_public_ptr", set_public_ptr) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="externalIpv6") def external_ipv6(self) -> Optional[pulumi.Input[str]]: """ The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. """ return pulumi.get(self, "external_ipv6") @external_ipv6.setter def external_ipv6(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "external_ipv6", value) @property @pulumi.getter(name="externalIpv6PrefixLength") def external_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The prefix length of the external IPv6 range. """ return pulumi.get(self, "external_ipv6_prefix_length") @external_ipv6_prefix_length.setter def external_ipv6_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "external_ipv6_prefix_length", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="natIP") def nat_ip(self) -> Optional[pulumi.Input[str]]: """ An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. """ return pulumi.get(self, "nat_ip") @nat_ip.setter def nat_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "nat_ip", value) @property @pulumi.getter(name="networkTier") def network_tier(self) -> Optional[pulumi.Input['AccessConfigNetworkTier']]: """ This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. """ return pulumi.get(self, "network_tier") @network_tier.setter def network_tier(self, value: Optional[pulumi.Input['AccessConfigNetworkTier']]): pulumi.set(self, "network_tier", value) @property @pulumi.getter(name="publicPtrDomainName") def public_ptr_domain_name(self) -> Optional[pulumi.Input[str]]: """ The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. """ return pulumi.get(self, "public_ptr_domain_name") @public_ptr_domain_name.setter def public_ptr_domain_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "public_ptr_domain_name", value) @property @pulumi.getter(name="setPublicDns") def set_public_dns(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. """ return pulumi.get(self, "set_public_dns") @set_public_dns.setter def set_public_dns(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "set_public_dns", value) @property @pulumi.getter(name="setPublicPtr") def set_public_ptr(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. """ return pulumi.get(self, "set_public_ptr") @set_public_ptr.setter def set_public_ptr(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "set_public_ptr", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['AccessConfigType']]: """ The type of configuration. The default and only option is ONE_TO_ONE_NAT. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['AccessConfigType']]): pulumi.set(self, "type", value) @pulumi.input_type class AdvancedMachineFeaturesArgs: def __init__(__self__, *, enable_nested_virtualization: Optional[pulumi.Input[bool]] = None, enable_uefi_networking: Optional[pulumi.Input[bool]] = None, numa_node_count: Optional[pulumi.Input[int]] = None, threads_per_core: Optional[pulumi.Input[int]] = None, visible_core_count: Optional[pulumi.Input[int]] = None): """ Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled). :param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false). :param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation. :param pulumi.Input[int] numa_node_count: The number of vNUMA nodes. :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. :param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. """ if enable_nested_virtualization is not None: pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization) if enable_uefi_networking is not None: pulumi.set(__self__, "enable_uefi_networking", enable_uefi_networking) if numa_node_count is not None: pulumi.set(__self__, "numa_node_count", numa_node_count) if threads_per_core is not None: pulumi.set(__self__, "threads_per_core", threads_per_core) if visible_core_count is not None: pulumi.set(__self__, "visible_core_count", visible_core_count) @property @pulumi.getter(name="enableNestedVirtualization") def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable nested virtualization or not (default is false). """ return pulumi.get(self, "enable_nested_virtualization") @enable_nested_virtualization.setter def enable_nested_virtualization(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_nested_virtualization", value) @property @pulumi.getter(name="enableUefiNetworking") def enable_uefi_networking(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable UEFI networking for instance creation. """ return pulumi.get(self, "enable_uefi_networking") @enable_uefi_networking.setter def enable_uefi_networking(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_uefi_networking", value) @property @pulumi.getter(name="numaNodeCount") def numa_node_count(self) -> Optional[pulumi.Input[int]]: """ The number of vNUMA nodes. """ return pulumi.get(self, "numa_node_count") @numa_node_count.setter def numa_node_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "numa_node_count", value) @property @pulumi.getter(name="threadsPerCore") def threads_per_core(self) -> Optional[pulumi.Input[int]]: """ The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. """ return pulumi.get(self, "threads_per_core") @threads_per_core.setter def threads_per_core(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "threads_per_core", value) @property @pulumi.getter(name="visibleCoreCount") def visible_core_count(self) -> Optional[pulumi.Input[int]]: """ The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. """ return pulumi.get(self, "visible_core_count") @visible_core_count.setter def visible_core_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "visible_core_count", value) @pulumi.input_type class AliasIpRangeArgs: def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]] = None, subnetwork_range_name: Optional[pulumi.Input[str]] = None): """ An alias IP range attached to an instance's network interface. :param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). :param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. """ if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if subnetwork_range_name is not None: pulumi.set(__self__, "subnetwork_range_name", subnetwork_range_name) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="subnetworkRangeName") def subnetwork_range_name(self) -> Optional[pulumi.Input[str]]: """ The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. """ return pulumi.get(self, "subnetwork_range_name") @subnetwork_range_name.setter def subnetwork_range_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork_range_name", value) @pulumi.input_type class AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs: def __init__(__self__, *, disk_size_gb: Optional[pulumi.Input[str]] = None, interface: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']] = None): """ :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if interface is not None: pulumi.set(__self__, "interface", interface) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ Specifies the size of the disk in base-2 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter def interface(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]: """ Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ return pulumi.get(self, "interface") @interface.setter def interface(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]): pulumi.set(self, "interface", value) @pulumi.input_type class AllocationSpecificSKUAllocationReservedInstancePropertiesArgs: def __init__(__self__, *, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]] = None, local_ssds: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]] = None, location_hint: Optional[pulumi.Input[str]] = None, machine_type: Optional[pulumi.Input[str]] = None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]] = None, maintenance_interval: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']] = None, min_cpu_platform: Optional[pulumi.Input[str]] = None): """ Properties of the SKU instances being reserved. Next ID: 9 :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count. :param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. :param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation. """ if guest_accelerators is not None: pulumi.set(__self__, "guest_accelerators", guest_accelerators) if local_ssds is not None: pulumi.set(__self__, "local_ssds", local_ssds) if location_hint is not None: pulumi.set(__self__, "location_hint", location_hint) if machine_type is not None: pulumi.set(__self__, "machine_type", machine_type) if maintenance_freeze_duration_hours is not None: pulumi.set(__self__, "maintenance_freeze_duration_hours", maintenance_freeze_duration_hours) if maintenance_interval is not None: pulumi.set(__self__, "maintenance_interval", maintenance_interval) if min_cpu_platform is not None: pulumi.set(__self__, "min_cpu_platform", min_cpu_platform) @property @pulumi.getter(name="guestAccelerators") def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]: """ Specifies accelerator type and count. """ return pulumi.get(self, "guest_accelerators") @guest_accelerators.setter def guest_accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]): pulumi.set(self, "guest_accelerators", value) @property @pulumi.getter(name="localSsds") def local_ssds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]: """ Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. """ return pulumi.get(self, "local_ssds") @local_ssds.setter def local_ssds(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]): pulumi.set(self, "local_ssds", value) @property @pulumi.getter(name="locationHint") def location_hint(self) -> Optional[pulumi.Input[str]]: """ An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. """ return pulumi.get(self, "location_hint") @location_hint.setter def location_hint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location_hint", value) @property @pulumi.getter(name="machineType") def machine_type(self) -> Optional[pulumi.Input[str]]: """ Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. """ return pulumi.get(self, "machine_type") @machine_type.setter def machine_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "machine_type", value) @property @pulumi.getter(name="maintenanceFreezeDurationHours") def maintenance_freeze_duration_hours(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. """ return pulumi.get(self, "maintenance_freeze_duration_hours") @maintenance_freeze_duration_hours.setter def maintenance_freeze_duration_hours(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "maintenance_freeze_duration_hours", value) @property @pulumi.getter(name="maintenanceInterval") def maintenance_interval(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]: """ For more information about maintenance intervals, see Setting maintenance intervals. """ return pulumi.get(self, "maintenance_interval") @maintenance_interval.setter def maintenance_interval(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]): pulumi.set(self, "maintenance_interval", value) @property @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[pulumi.Input[str]]: """ Minimum cpu platform the reservation. """ return pulumi.get(self, "min_cpu_platform") @min_cpu_platform.setter def min_cpu_platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "min_cpu_platform", value) @pulumi.input_type class AllocationSpecificSKUReservationArgs: def __init__(__self__, *, count: Optional[pulumi.Input[str]] = None, instance_properties: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']] = None): """ This reservation type allows to pre allocate specific instance configuration. Next ID: 5 :param pulumi.Input[str] count: Specifies the number of resources that are allocated. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: The instance properties for the reservation. """ if count is not None: pulumi.set(__self__, "count", count) if instance_properties is not None: pulumi.set(__self__, "instance_properties", instance_properties) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[str]]: """ Specifies the number of resources that are allocated. """ return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="instanceProperties") def instance_properties(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]: """ The instance properties for the reservation. """ return pulumi.get(self, "instance_properties") @instance_properties.setter def instance_properties(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]): pulumi.set(self, "instance_properties", value) @pulumi.input_type class AttachedDiskInitializeParamsArgs: def __init__(__self__, *, architecture: Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']] = None, description: Optional[pulumi.Input[str]] = None, disk_name: Optional[pulumi.Input[str]] = None, disk_size_gb: Optional[pulumi.Input[str]] = None, disk_type: Optional[pulumi.Input[str]] = None, guest_os_features: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, license_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, licenses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, multi_writer: Optional[pulumi.Input[bool]] = None, on_update_action: Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']] = None, provisioned_iops: Optional[pulumi.Input[str]] = None, replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, resource_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_image: Optional[pulumi.Input[str]] = None, source_image_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, source_instant_snapshot: Optional[pulumi.Input[str]] = None, source_snapshot: Optional[pulumi.Input[str]] = None, source_snapshot_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None): """ [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInitializeParamsArchitecture'] architecture: The architecture of the attached disk. Valid values are arm64 or x86_64. :param pulumi.Input[str] description: An optional description. Provide this property when creating the disk. :param pulumi.Input[str] disk_name: Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. :param pulumi.Input[str] disk_type: Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. :param pulumi.Input[Sequence[pulumi.Input[str]]] license_codes: Integer license codes indicating which licenses are attached to this disk. :param pulumi.Input[Sequence[pulumi.Input[str]]] licenses: A list of publicly visible licenses. Reserved for Google's use. :param pulumi.Input[bool] multi_writer: Indicates whether or not the disk can be read/write attached to more than one instance. :param pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction'] on_update_action: Specifies which action to take on instance update with this disk. Default is to use the existing disk. :param pulumi.Input[str] provisioned_iops: Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to. Only applicable for regional resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. :param pulumi.Input[str] source_image: The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_image_encryption_key: The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. :param pulumi.Input[str] source_instant_snapshot: The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. :param pulumi.Input[str] source_snapshot: The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. """ if architecture is not None: pulumi.set(__self__, "architecture", architecture) if description is not None: pulumi.set(__self__, "description", description) if disk_name is not None: pulumi.set(__self__, "disk_name", disk_name) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if disk_type is not None: pulumi.set(__self__, "disk_type", disk_type) if guest_os_features is not None: pulumi.set(__self__, "guest_os_features", guest_os_features) if labels is not None: pulumi.set(__self__, "labels", labels) if license_codes is not None: pulumi.set(__self__, "license_codes", license_codes) if licenses is not None: pulumi.set(__self__, "licenses", licenses) if multi_writer is not None: pulumi.set(__self__, "multi_writer", multi_writer) if on_update_action is not None: pulumi.set(__self__, "on_update_action", on_update_action) if provisioned_iops is not None: pulumi.set(__self__, "provisioned_iops", provisioned_iops) if replica_zones is not None: pulumi.set(__self__, "replica_zones", replica_zones) if resource_policies is not None: pulumi.set(__self__, "resource_policies", resource_policies) if source_image is not None: pulumi.set(__self__, "source_image", source_image) if source_image_encryption_key is not None: pulumi.set(__self__, "source_image_encryption_key", source_image_encryption_key) if source_instant_snapshot is not None: pulumi.set(__self__, "source_instant_snapshot", source_instant_snapshot) if source_snapshot is not None: pulumi.set(__self__, "source_snapshot", source_snapshot) if source_snapshot_encryption_key is not None: pulumi.set(__self__, "source_snapshot_encryption_key", source_snapshot_encryption_key) @property @pulumi.getter def architecture(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']]: """ The architecture of the attached disk. Valid values are arm64 or x86_64. """ return pulumi.get(self, "architecture") @architecture.setter def architecture(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsArchitecture']]): pulumi.set(self, "architecture", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description. Provide this property when creating the disk. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="diskName") def disk_name(self) -> Optional[pulumi.Input[str]]: """ Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. """ return pulumi.get(self, "disk_name") @disk_name.setter def disk_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_name", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="diskType") def disk_type(self) -> Optional[pulumi.Input[str]]: """ Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. """ return pulumi.get(self, "disk_type") @disk_type.setter def disk_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_type", value) @property @pulumi.getter(name="guestOsFeatures") def guest_os_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]: """ A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures """ return pulumi.get(self, "guest_os_features") @guest_os_features.setter def guest_os_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]): pulumi.set(self, "guest_os_features", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="licenseCodes") def license_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Integer license codes indicating which licenses are attached to this disk. """ return pulumi.get(self, "license_codes") @license_codes.setter def license_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "license_codes", value) @property @pulumi.getter def licenses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of publicly visible licenses. Reserved for Google's use. """ return pulumi.get(self, "licenses") @licenses.setter def licenses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "licenses", value) @property @pulumi.getter(name="multiWriter") def multi_writer(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not the disk can be read/write attached to more than one instance. """ return pulumi.get(self, "multi_writer") @multi_writer.setter def multi_writer(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "multi_writer", value) @property @pulumi.getter(name="onUpdateAction") def on_update_action(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']]: """ Specifies which action to take on instance update with this disk. Default is to use the existing disk. """ return pulumi.get(self, "on_update_action") @on_update_action.setter def on_update_action(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction']]): pulumi.set(self, "on_update_action", value) @property @pulumi.getter(name="provisionedIops") def provisioned_iops(self) -> Optional[pulumi.Input[str]]: """ Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. """ return pulumi.get(self, "provisioned_iops") @provisioned_iops.setter def provisioned_iops(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "provisioned_iops", value) @property @pulumi.getter(name="replicaZones") def replica_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ URLs of the zones where the disk should be replicated to. Only applicable for regional resources. """ return pulumi.get(self, "replica_zones") @replica_zones.setter def replica_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "replica_zones", value) @property @pulumi.getter(name="resourcePolicies") def resource_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. """ return pulumi.get(self, "resource_policies") @resource_policies.setter def resource_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "resource_policies", value) @property @pulumi.getter(name="sourceImage") def source_image(self) -> Optional[pulumi.Input[str]]: """ The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. """ return pulumi.get(self, "source_image") @source_image.setter def source_image(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_image", value) @property @pulumi.getter(name="sourceImageEncryptionKey") def source_image_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. """ return pulumi.get(self, "source_image_encryption_key") @source_image_encryption_key.setter def source_image_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "source_image_encryption_key", value) @property @pulumi.getter(name="sourceInstantSnapshot") def source_instant_snapshot(self) -> Optional[pulumi.Input[str]]: """ The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. """ return pulumi.get(self, "source_instant_snapshot") @source_instant_snapshot.setter def source_instant_snapshot(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_instant_snapshot", value) @property @pulumi.getter(name="sourceSnapshot") def source_snapshot(self) -> Optional[pulumi.Input[str]]: """ The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. """ return pulumi.get(self, "source_snapshot") @source_snapshot.setter def source_snapshot(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_snapshot", value) @property @pulumi.getter(name="sourceSnapshotEncryptionKey") def source_snapshot_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source snapshot. """ return pulumi.get(self, "source_snapshot_encryption_key") @source_snapshot_encryption_key.setter def source_snapshot_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "source_snapshot_encryption_key", value) @pulumi.input_type class AttachedDiskArgs: def __init__(__self__, *, auto_delete: Optional[pulumi.Input[bool]] = None, boot: Optional[pulumi.Input[bool]] = None, device_name: Optional[pulumi.Input[str]] = None, disk_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, disk_size_gb: Optional[pulumi.Input[str]] = None, force_attach: Optional[pulumi.Input[bool]] = None, guest_os_features: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]] = None, initialize_params: Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']] = None, interface: Optional[pulumi.Input['AttachedDiskInterface']] = None, mode: Optional[pulumi.Input['AttachedDiskMode']] = None, saved_state: Optional[pulumi.Input['AttachedDiskSavedState']] = None, source: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['AttachedDiskType']] = None): """ An instance-attached disk resource. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[bool] boot: Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. :param pulumi.Input[str] device_name: Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. :param pulumi.Input[str] disk_size_gb: The size of the disk in GB. :param pulumi.Input[bool] force_attach: [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. :param pulumi.Input['AttachedDiskInitializeParamsArgs'] initialize_params: [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. :param pulumi.Input['AttachedDiskMode'] mode: The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. :param pulumi.Input['AttachedDiskSavedState'] saved_state: For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. :param pulumi.Input[str] source: Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. :param pulumi.Input['AttachedDiskType'] type: Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. """ if auto_delete is not None: pulumi.set(__self__, "auto_delete", auto_delete) if boot is not None: pulumi.set(__self__, "boot", boot) if device_name is not None: pulumi.set(__self__, "device_name", device_name) if disk_encryption_key is not None: pulumi.set(__self__, "disk_encryption_key", disk_encryption_key) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if force_attach is not None: pulumi.set(__self__, "force_attach", force_attach) if guest_os_features is not None: pulumi.set(__self__, "guest_os_features", guest_os_features) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) if interface is not None: pulumi.set(__self__, "interface", interface) if mode is not None: pulumi.set(__self__, "mode", mode) if saved_state is not None: pulumi.set(__self__, "saved_state", saved_state) if source is not None: pulumi.set(__self__, "source", source) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="autoDelete") def auto_delete(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). """ return pulumi.get(self, "auto_delete") @auto_delete.setter def auto_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_delete", value) @property @pulumi.getter def boot(self) -> Optional[pulumi.Input[bool]]: """ Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. """ return pulumi.get(self, "boot") @boot.setter def boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "boot", value) @property @pulumi.getter(name="deviceName") def device_name(self) -> Optional[pulumi.Input[str]]: """ Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. """ return pulumi.get(self, "device_name") @device_name.setter def device_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "device_name", value) @property @pulumi.getter(name="diskEncryptionKey") def disk_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. """ return pulumi.get(self, "disk_encryption_key") @disk_encryption_key.setter def disk_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "disk_encryption_key", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[str]]: """ The size of the disk in GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="forceAttach") def force_attach(self) -> Optional[pulumi.Input[bool]]: """ [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. """ return pulumi.get(self, "force_attach") @force_attach.setter def force_attach(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_attach", value) @property @pulumi.getter(name="guestOsFeatures") def guest_os_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]: """ A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. """ return pulumi.get(self, "guest_os_features") @guest_os_features.setter def guest_os_features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]]]): pulumi.set(self, "guest_os_features", value) @property @pulumi.getter(name="initializeParams") def initialize_params(self) -> Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']]: """ [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. """ return pulumi.get(self, "initialize_params") @initialize_params.setter def initialize_params(self, value: Optional[pulumi.Input['AttachedDiskInitializeParamsArgs']]): pulumi.set(self, "initialize_params", value) @property @pulumi.getter def interface(self) -> Optional[pulumi.Input['AttachedDiskInterface']]: """ Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. """ return pulumi.get(self, "interface") @interface.setter def interface(self, value: Optional[pulumi.Input['AttachedDiskInterface']]): pulumi.set(self, "interface", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['AttachedDiskMode']]: """ The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['AttachedDiskMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter(name="savedState") def saved_state(self) -> Optional[pulumi.Input['AttachedDiskSavedState']]: """ For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. """ return pulumi.get(self, "saved_state") @saved_state.setter def saved_state(self, value: Optional[pulumi.Input['AttachedDiskSavedState']]): pulumi.set(self, "saved_state", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input[str]]: """ Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['AttachedDiskType']]: """ Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['AttachedDiskType']]): pulumi.set(self, "type", value) @pulumi.input_type class AuditConfigArgs: def __init__(__self__, *, audit_log_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]] = None, exempted_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, service: Optional[pulumi.Input[str]] = None): """ Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging. :param pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]] audit_log_configs: The configuration for logging of each type of permission. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] service: Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. """ if audit_log_configs is not None: pulumi.set(__self__, "audit_log_configs", audit_log_configs) if exempted_members is not None: pulumi.set(__self__, "exempted_members", exempted_members) if service is not None: pulumi.set(__self__, "service", service) @property @pulumi.getter(name="auditLogConfigs") def audit_log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]: """ The configuration for logging of each type of permission. """ return pulumi.get(self, "audit_log_configs") @audit_log_configs.setter def audit_log_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]]]): pulumi.set(self, "audit_log_configs", value) @property @pulumi.getter(name="exemptedMembers") def exempted_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "exempted_members") @exempted_members.setter def exempted_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "exempted_members", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @pulumi.input_type class AuditLogConfigArgs: def __init__(__self__, *, exempted_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ignore_child_exemptions: Optional[pulumi.Input[bool]] = None, log_type: Optional[pulumi.Input['AuditLogConfigLogType']] = None): """ Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. :param pulumi.Input[bool] ignore_child_exemptions: This is deprecated and has no effect. Do not use. :param pulumi.Input['AuditLogConfigLogType'] log_type: The log type that this config enables. """ if exempted_members is not None: pulumi.set(__self__, "exempted_members", exempted_members) if ignore_child_exemptions is not None: pulumi.set(__self__, "ignore_child_exemptions", ignore_child_exemptions) if log_type is not None: pulumi.set(__self__, "log_type", log_type) @property @pulumi.getter(name="exemptedMembers") def exempted_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. """ return pulumi.get(self, "exempted_members") @exempted_members.setter def exempted_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "exempted_members", value) @property @pulumi.getter(name="ignoreChildExemptions") def ignore_child_exemptions(self) -> Optional[pulumi.Input[bool]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "ignore_child_exemptions") @ignore_child_exemptions.setter def ignore_child_exemptions(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_child_exemptions", value) @property @pulumi.getter(name="logType") def log_type(self) -> Optional[pulumi.Input['AuditLogConfigLogType']]: """ The log type that this config enables. """ return pulumi.get(self, "log_type") @log_type.setter def log_type(self, value: Optional[pulumi.Input['AuditLogConfigLogType']]): pulumi.set(self, "log_type", value) @pulumi.input_type class AuthorizationLoggingOptionsArgs: def __init__(__self__, *, permission_type: Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsPermissionType'] permission_type: This is deprecated and has no effect. Do not use. """ if permission_type is not None: pulumi.set(__self__, "permission_type", permission_type) @property @pulumi.getter(name="permissionType") def permission_type(self) -> Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "permission_type") @permission_type.setter def permission_type(self, value: Optional[pulumi.Input['AuthorizationLoggingOptionsPermissionType']]): pulumi.set(self, "permission_type", value) @pulumi.input_type class AutoscalingPolicyCpuUtilizationArgs: def __init__(__self__, *, predictive_method: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']] = None, utilization_target: Optional[pulumi.Input[float]] = None): """ CPU utilization policy. :param pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod'] predictive_method: Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. :param pulumi.Input[float] utilization_target: The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. """ if predictive_method is not None: pulumi.set(__self__, "predictive_method", predictive_method) if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) @property @pulumi.getter(name="predictiveMethod") def predictive_method(self) -> Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']]: """ Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. """ return pulumi.get(self, "predictive_method") @predictive_method.setter def predictive_method(self, value: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod']]): pulumi.set(self, "predictive_method", value) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @pulumi.input_type class AutoscalingPolicyCustomMetricUtilizationArgs: def __init__(__self__, *, filter: Optional[pulumi.Input[str]] = None, metric: Optional[pulumi.Input[str]] = None, single_instance_assignment: Optional[pulumi.Input[float]] = None, utilization_target: Optional[pulumi.Input[float]] = None, utilization_target_type: Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']] = None): """ Custom utilization metric policy. :param pulumi.Input[str] filter: A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. :param pulumi.Input[str] metric: The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. :param pulumi.Input[float] single_instance_assignment: If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. :param pulumi.Input[float] utilization_target: The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. :param pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType'] utilization_target_type: Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. """ if filter is not None: pulumi.set(__self__, "filter", filter) if metric is not None: pulumi.set(__self__, "metric", metric) if single_instance_assignment is not None: pulumi.set(__self__, "single_instance_assignment", single_instance_assignment) if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) if utilization_target_type is not None: pulumi.set(__self__, "utilization_target_type", utilization_target_type) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input[str]]: """ A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter", value) @property @pulumi.getter def metric(self) -> Optional[pulumi.Input[str]]: """ The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. """ return pulumi.get(self, "metric") @metric.setter def metric(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "metric", value) @property @pulumi.getter(name="singleInstanceAssignment") def single_instance_assignment(self) -> Optional[pulumi.Input[float]]: """ If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. """ return pulumi.get(self, "single_instance_assignment") @single_instance_assignment.setter def single_instance_assignment(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "single_instance_assignment", value) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @property @pulumi.getter(name="utilizationTargetType") def utilization_target_type(self) -> Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']]: """ Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. """ return pulumi.get(self, "utilization_target_type") @utilization_target_type.setter def utilization_target_type(self, value: Optional[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType']]): pulumi.set(self, "utilization_target_type", value) @pulumi.input_type class AutoscalingPolicyLoadBalancingUtilizationArgs: def __init__(__self__, *, utilization_target: Optional[pulumi.Input[float]] = None): """ Configuration parameters of autoscaling based on load balancing. :param pulumi.Input[float] utilization_target: Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. """ if utilization_target is not None: pulumi.set(__self__, "utilization_target", utilization_target) @property @pulumi.getter(name="utilizationTarget") def utilization_target(self) -> Optional[pulumi.Input[float]]: """ Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. """ return pulumi.get(self, "utilization_target") @utilization_target.setter def utilization_target(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "utilization_target", value) @pulumi.input_type class AutoscalingPolicyScaleDownControlArgs: def __init__(__self__, *, max_scaled_down_replicas: Optional[pulumi.Input['FixedOrPercentArgs']] = None, time_window_sec: Optional[pulumi.Input[int]] = None): """ Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_down_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ if max_scaled_down_replicas is not None: pulumi.set(__self__, "max_scaled_down_replicas", max_scaled_down_replicas) if time_window_sec is not None: pulumi.set(__self__, "time_window_sec", time_window_sec) @property @pulumi.getter(name="maxScaledDownReplicas") def max_scaled_down_replicas(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. """ return pulumi.get(self, "max_scaled_down_replicas") @max_scaled_down_replicas.setter def max_scaled_down_replicas(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_scaled_down_replicas", value) @property @pulumi.getter(name="timeWindowSec") def time_window_sec(self) -> Optional[pulumi.Input[int]]: """ How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ return pulumi.get(self, "time_window_sec") @time_window_sec.setter def time_window_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "time_window_sec", value) @pulumi.input_type class AutoscalingPolicyScaleInControlArgs: def __init__(__self__, *, max_scaled_in_replicas: Optional[pulumi.Input['FixedOrPercentArgs']] = None, time_window_sec: Optional[pulumi.Input[int]] = None): """ Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_in_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ if max_scaled_in_replicas is not None: pulumi.set(__self__, "max_scaled_in_replicas", max_scaled_in_replicas) if time_window_sec is not None: pulumi.set(__self__, "time_window_sec", time_window_sec) @property @pulumi.getter(name="maxScaledInReplicas") def max_scaled_in_replicas(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. """ return pulumi.get(self, "max_scaled_in_replicas") @max_scaled_in_replicas.setter def max_scaled_in_replicas(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_scaled_in_replicas", value) @property @pulumi.getter(name="timeWindowSec") def time_window_sec(self) -> Optional[pulumi.Input[int]]: """ How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. """ return pulumi.get(self, "time_window_sec") @time_window_sec.setter def time_window_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "time_window_sec", value) @pulumi.input_type class AutoscalingPolicyArgs: def __init__(__self__, *, cool_down_period_sec: Optional[pulumi.Input[int]] = None, cpu_utilization: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']] = None, custom_metric_utilizations: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]] = None, load_balancing_utilization: Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']] = None, max_num_replicas: Optional[pulumi.Input[int]] = None, min_num_replicas: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['AutoscalingPolicyMode']] = None, scale_down_control: Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']] = None, scale_in_control: Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']] = None, scaling_schedules: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Cloud Autoscaler policy. :param pulumi.Input[int] cool_down_period_sec: The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. :param pulumi.Input['AutoscalingPolicyCpuUtilizationArgs'] cpu_utilization: Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. :param pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]] custom_metric_utilizations: Configuration parameters of autoscaling based on a custom metric. :param pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs'] load_balancing_utilization: Configuration parameters of autoscaling based on load balancer. :param pulumi.Input[int] max_num_replicas: The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. :param pulumi.Input[int] min_num_replicas: The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. :param pulumi.Input['AutoscalingPolicyMode'] mode: Defines operating mode for this policy. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] scaling_schedules: Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. """ if cool_down_period_sec is not None: pulumi.set(__self__, "cool_down_period_sec", cool_down_period_sec) if cpu_utilization is not None: pulumi.set(__self__, "cpu_utilization", cpu_utilization) if custom_metric_utilizations is not None: pulumi.set(__self__, "custom_metric_utilizations", custom_metric_utilizations) if load_balancing_utilization is not None: pulumi.set(__self__, "load_balancing_utilization", load_balancing_utilization) if max_num_replicas is not None: pulumi.set(__self__, "max_num_replicas", max_num_replicas) if min_num_replicas is not None: pulumi.set(__self__, "min_num_replicas", min_num_replicas) if mode is not None: pulumi.set(__self__, "mode", mode) if scale_down_control is not None: pulumi.set(__self__, "scale_down_control", scale_down_control) if scale_in_control is not None: pulumi.set(__self__, "scale_in_control", scale_in_control) if scaling_schedules is not None: pulumi.set(__self__, "scaling_schedules", scaling_schedules) @property @pulumi.getter(name="coolDownPeriodSec") def cool_down_period_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. """ return pulumi.get(self, "cool_down_period_sec") @cool_down_period_sec.setter def cool_down_period_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "cool_down_period_sec", value) @property @pulumi.getter(name="cpuUtilization") def cpu_utilization(self) -> Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']]: """ Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. """ return pulumi.get(self, "cpu_utilization") @cpu_utilization.setter def cpu_utilization(self, value: Optional[pulumi.Input['AutoscalingPolicyCpuUtilizationArgs']]): pulumi.set(self, "cpu_utilization", value) @property @pulumi.getter(name="customMetricUtilizations") def custom_metric_utilizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]]: """ Configuration parameters of autoscaling based on a custom metric. """ return pulumi.get(self, "custom_metric_utilizations") @custom_metric_utilizations.setter def custom_metric_utilizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]]]): pulumi.set(self, "custom_metric_utilizations", value) @property @pulumi.getter(name="loadBalancingUtilization") def load_balancing_utilization(self) -> Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']]: """ Configuration parameters of autoscaling based on load balancer. """ return pulumi.get(self, "load_balancing_utilization") @load_balancing_utilization.setter def load_balancing_utilization(self, value: Optional[pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs']]): pulumi.set(self, "load_balancing_utilization", value) @property @pulumi.getter(name="maxNumReplicas") def max_num_replicas(self) -> Optional[pulumi.Input[int]]: """ The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. """ return pulumi.get(self, "max_num_replicas") @max_num_replicas.setter def max_num_replicas(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_num_replicas", value) @property @pulumi.getter(name="minNumReplicas") def min_num_replicas(self) -> Optional[pulumi.Input[int]]: """ The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. """ return pulumi.get(self, "min_num_replicas") @min_num_replicas.setter def min_num_replicas(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_num_replicas", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['AutoscalingPolicyMode']]: """ Defines operating mode for this policy. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['AutoscalingPolicyMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter(name="scaleDownControl") def scale_down_control(self) -> Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']]: return pulumi.get(self, "scale_down_control") @scale_down_control.setter def scale_down_control(self, value: Optional[pulumi.Input['AutoscalingPolicyScaleDownControlArgs']]): pulumi.set(self, "scale_down_control", value) @property @pulumi.getter(name="scaleInControl") def scale_in_control(self) -> Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']]: return pulumi.get(self, "scale_in_control") @scale_in_control.setter def scale_in_control(self, value: Optional[pulumi.Input['AutoscalingPolicyScaleInControlArgs']]): pulumi.set(self, "scale_in_control", value) @property @pulumi.getter(name="scalingSchedules") def scaling_schedules(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. """ return pulumi.get(self, "scaling_schedules") @scaling_schedules.setter def scaling_schedules(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "scaling_schedules", value) @pulumi.input_type class BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None): """ Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The header field name to match on when bypassing cache. Values are case-insensitive. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @pulumi.input_type class BackendBucketCdnPolicyCacheKeyPolicyArgs: def __init__(__self__, *, include_http_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, query_string_whitelist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. """ if include_http_headers is not None: pulumi.set(__self__, "include_http_headers", include_http_headers) if query_string_whitelist is not None: pulumi.set(__self__, "query_string_whitelist", query_string_whitelist) @property @pulumi.getter(name="includeHttpHeaders") def include_http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP request headers (by name) to be used in the cache key. """ return pulumi.get(self, "include_http_headers") @include_http_headers.setter def include_http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_http_headers", value) @property @pulumi.getter(name="queryStringWhitelist") def query_string_whitelist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_whitelist") @query_string_whitelist.setter def query_string_whitelist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_whitelist", value) @pulumi.input_type class BackendBucketCdnPolicyNegativeCachingPolicyArgs: def __init__(__self__, *, code: Optional[pulumi.Input[int]] = None, ttl: Optional[pulumi.Input[int]] = None): """ Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ if code is not None: pulumi.set(__self__, "code", code) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def code(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. """ return pulumi.get(self, "code") @code.setter def code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "code", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input[int]]: """ The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ttl", value) @pulumi.input_type class BackendBucketCdnPolicyArgs: def __init__(__self__, *, bypass_cache_on_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]] = None, cache_key_policy: Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']] = None, cache_mode: Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']] = None, client_ttl: Optional[pulumi.Input[int]] = None, default_ttl: Optional[pulumi.Input[int]] = None, max_ttl: Optional[pulumi.Input[int]] = None, negative_caching: Optional[pulumi.Input[bool]] = None, negative_caching_policy: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]] = None, request_coalescing: Optional[pulumi.Input[bool]] = None, serve_while_stale: Optional[pulumi.Input[int]] = None, signed_url_cache_max_age_sec: Optional[pulumi.Input[str]] = None): """ Message containing Cloud CDN configuration for a backend bucket. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendBucketCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ if bypass_cache_on_request_headers is not None: pulumi.set(__self__, "bypass_cache_on_request_headers", bypass_cache_on_request_headers) if cache_key_policy is not None: pulumi.set(__self__, "cache_key_policy", cache_key_policy) if cache_mode is not None: pulumi.set(__self__, "cache_mode", cache_mode) if client_ttl is not None: pulumi.set(__self__, "client_ttl", client_ttl) if default_ttl is not None: pulumi.set(__self__, "default_ttl", default_ttl) if max_ttl is not None: pulumi.set(__self__, "max_ttl", max_ttl) if negative_caching is not None: pulumi.set(__self__, "negative_caching", negative_caching) if negative_caching_policy is not None: pulumi.set(__self__, "negative_caching_policy", negative_caching_policy) if request_coalescing is not None: pulumi.set(__self__, "request_coalescing", request_coalescing) if serve_while_stale is not None: pulumi.set(__self__, "serve_while_stale", serve_while_stale) if signed_url_cache_max_age_sec is not None: pulumi.set(__self__, "signed_url_cache_max_age_sec", signed_url_cache_max_age_sec) @property @pulumi.getter(name="bypassCacheOnRequestHeaders") def bypass_cache_on_request_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]]: """ Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. """ return pulumi.get(self, "bypass_cache_on_request_headers") @bypass_cache_on_request_headers.setter def bypass_cache_on_request_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]]]): pulumi.set(self, "bypass_cache_on_request_headers", value) @property @pulumi.getter(name="cacheKeyPolicy") def cache_key_policy(self) -> Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']]: """ The CacheKeyPolicy for this CdnPolicy. """ return pulumi.get(self, "cache_key_policy") @cache_key_policy.setter def cache_key_policy(self, value: Optional[pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs']]): pulumi.set(self, "cache_key_policy", value) @property @pulumi.getter(name="cacheMode") def cache_mode(self) -> Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']]: """ Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. """ return pulumi.get(self, "cache_mode") @cache_mode.setter def cache_mode(self, value: Optional[pulumi.Input['BackendBucketCdnPolicyCacheMode']]): pulumi.set(self, "cache_mode", value) @property @pulumi.getter(name="clientTtl") def client_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). """ return pulumi.get(self, "client_ttl") @client_ttl.setter def client_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "client_ttl", value) @property @pulumi.getter(name="defaultTtl") def default_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "default_ttl") @default_ttl.setter def default_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "default_ttl", value) @property @pulumi.getter(name="maxTtl") def max_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "max_ttl") @max_ttl.setter def max_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ttl", value) @property @pulumi.getter(name="negativeCaching") def negative_caching(self) -> Optional[pulumi.Input[bool]]: """ Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. """ return pulumi.get(self, "negative_caching") @negative_caching.setter def negative_caching(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "negative_caching", value) @property @pulumi.getter(name="negativeCachingPolicy") def negative_caching_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]]: """ Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. """ return pulumi.get(self, "negative_caching_policy") @negative_caching_policy.setter def negative_caching_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]]]): pulumi.set(self, "negative_caching_policy", value) @property @pulumi.getter(name="requestCoalescing") def request_coalescing(self) -> Optional[pulumi.Input[bool]]: """ If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. """ return pulumi.get(self, "request_coalescing") @request_coalescing.setter def request_coalescing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "request_coalescing", value) @property @pulumi.getter(name="serveWhileStale") def serve_while_stale(self) -> Optional[pulumi.Input[int]]: """ Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. """ return pulumi.get(self, "serve_while_stale") @serve_while_stale.setter def serve_while_stale(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "serve_while_stale", value) @property @pulumi.getter(name="signedUrlCacheMaxAgeSec") def signed_url_cache_max_age_sec(self) -> Optional[pulumi.Input[str]]: """ Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ return pulumi.get(self, "signed_url_cache_max_age_sec") @signed_url_cache_max_age_sec.setter def signed_url_cache_max_age_sec(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "signed_url_cache_max_age_sec", value) @pulumi.input_type class BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None): """ Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The header field name to match on when bypassing cache. Values are case-insensitive. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @pulumi.input_type class BackendServiceCdnPolicyNegativeCachingPolicyArgs: def __init__(__self__, *, code: Optional[pulumi.Input[int]] = None, ttl: Optional[pulumi.Input[int]] = None): """ Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ if code is not None: pulumi.set(__self__, "code", code) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def code(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. """ return pulumi.get(self, "code") @code.setter def code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "code", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input[int]]: """ The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ttl", value) @pulumi.input_type class BackendServiceCdnPolicyArgs: def __init__(__self__, *, bypass_cache_on_request_headers: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]] = None, cache_key_policy: Optional[pulumi.Input['CacheKeyPolicyArgs']] = None, cache_mode: Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']] = None, client_ttl: Optional[pulumi.Input[int]] = None, default_ttl: Optional[pulumi.Input[int]] = None, max_ttl: Optional[pulumi.Input[int]] = None, negative_caching: Optional[pulumi.Input[bool]] = None, negative_caching_policy: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]] = None, request_coalescing: Optional[pulumi.Input[bool]] = None, serve_while_stale: Optional[pulumi.Input[int]] = None, signed_url_cache_max_age_sec: Optional[pulumi.Input[str]] = None): """ Message containing Cloud CDN configuration for a backend service. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['CacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendServiceCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ if bypass_cache_on_request_headers is not None: pulumi.set(__self__, "bypass_cache_on_request_headers", bypass_cache_on_request_headers) if cache_key_policy is not None: pulumi.set(__self__, "cache_key_policy", cache_key_policy) if cache_mode is not None: pulumi.set(__self__, "cache_mode", cache_mode) if client_ttl is not None: pulumi.set(__self__, "client_ttl", client_ttl) if default_ttl is not None: pulumi.set(__self__, "default_ttl", default_ttl) if max_ttl is not None: pulumi.set(__self__, "max_ttl", max_ttl) if negative_caching is not None: pulumi.set(__self__, "negative_caching", negative_caching) if negative_caching_policy is not None: pulumi.set(__self__, "negative_caching_policy", negative_caching_policy) if request_coalescing is not None: pulumi.set(__self__, "request_coalescing", request_coalescing) if serve_while_stale is not None: pulumi.set(__self__, "serve_while_stale", serve_while_stale) if signed_url_cache_max_age_sec is not None: pulumi.set(__self__, "signed_url_cache_max_age_sec", signed_url_cache_max_age_sec) @property @pulumi.getter(name="bypassCacheOnRequestHeaders") def bypass_cache_on_request_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]]: """ Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. """ return pulumi.get(self, "bypass_cache_on_request_headers") @bypass_cache_on_request_headers.setter def bypass_cache_on_request_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]]]): pulumi.set(self, "bypass_cache_on_request_headers", value) @property @pulumi.getter(name="cacheKeyPolicy") def cache_key_policy(self) -> Optional[pulumi.Input['CacheKeyPolicyArgs']]: """ The CacheKeyPolicy for this CdnPolicy. """ return pulumi.get(self, "cache_key_policy") @cache_key_policy.setter def cache_key_policy(self, value: Optional[pulumi.Input['CacheKeyPolicyArgs']]): pulumi.set(self, "cache_key_policy", value) @property @pulumi.getter(name="cacheMode") def cache_mode(self) -> Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']]: """ Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. """ return pulumi.get(self, "cache_mode") @cache_mode.setter def cache_mode(self, value: Optional[pulumi.Input['BackendServiceCdnPolicyCacheMode']]): pulumi.set(self, "cache_mode", value) @property @pulumi.getter(name="clientTtl") def client_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). """ return pulumi.get(self, "client_ttl") @client_ttl.setter def client_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "client_ttl", value) @property @pulumi.getter(name="defaultTtl") def default_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "default_ttl") @default_ttl.setter def default_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "default_ttl", value) @property @pulumi.getter(name="maxTtl") def max_ttl(self) -> Optional[pulumi.Input[int]]: """ Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. """ return pulumi.get(self, "max_ttl") @max_ttl.setter def max_ttl(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ttl", value) @property @pulumi.getter(name="negativeCaching") def negative_caching(self) -> Optional[pulumi.Input[bool]]: """ Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. """ return pulumi.get(self, "negative_caching") @negative_caching.setter def negative_caching(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "negative_caching", value) @property @pulumi.getter(name="negativeCachingPolicy") def negative_caching_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]]: """ Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. """ return pulumi.get(self, "negative_caching_policy") @negative_caching_policy.setter def negative_caching_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]]]): pulumi.set(self, "negative_caching_policy", value) @property @pulumi.getter(name="requestCoalescing") def request_coalescing(self) -> Optional[pulumi.Input[bool]]: """ If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. """ return pulumi.get(self, "request_coalescing") @request_coalescing.setter def request_coalescing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "request_coalescing", value) @property @pulumi.getter(name="serveWhileStale") def serve_while_stale(self) -> Optional[pulumi.Input[int]]: """ Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. """ return pulumi.get(self, "serve_while_stale") @serve_while_stale.setter def serve_while_stale(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "serve_while_stale", value) @property @pulumi.getter(name="signedUrlCacheMaxAgeSec") def signed_url_cache_max_age_sec(self) -> Optional[pulumi.Input[str]]: """ Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. """ return pulumi.get(self, "signed_url_cache_max_age_sec") @signed_url_cache_max_age_sec.setter def signed_url_cache_max_age_sec(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "signed_url_cache_max_age_sec", value) @pulumi.input_type class BackendServiceConnectionTrackingPolicyArgs: def __init__(__self__, *, connection_persistence_on_unhealthy_backends: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']] = None, enable_strong_affinity: Optional[pulumi.Input[bool]] = None, idle_timeout_sec: Optional[pulumi.Input[int]] = None, tracking_mode: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']] = None): """ Connection Tracking configuration for this BackendService. :param pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends'] connection_persistence_on_unhealthy_backends: Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). :param pulumi.Input[bool] enable_strong_affinity: Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. :param pulumi.Input[int] idle_timeout_sec: Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. :param pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode'] tracking_mode: Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). """ if connection_persistence_on_unhealthy_backends is not None: pulumi.set(__self__, "connection_persistence_on_unhealthy_backends", connection_persistence_on_unhealthy_backends) if enable_strong_affinity is not None: pulumi.set(__self__, "enable_strong_affinity", enable_strong_affinity) if idle_timeout_sec is not None: pulumi.set(__self__, "idle_timeout_sec", idle_timeout_sec) if tracking_mode is not None: pulumi.set(__self__, "tracking_mode", tracking_mode) @property @pulumi.getter(name="connectionPersistenceOnUnhealthyBackends") def connection_persistence_on_unhealthy_backends(self) -> Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']]: """ Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). """ return pulumi.get(self, "connection_persistence_on_unhealthy_backends") @connection_persistence_on_unhealthy_backends.setter def connection_persistence_on_unhealthy_backends(self, value: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends']]): pulumi.set(self, "connection_persistence_on_unhealthy_backends", value) @property @pulumi.getter(name="enableStrongAffinity") def enable_strong_affinity(self) -> Optional[pulumi.Input[bool]]: """ Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. """ return pulumi.get(self, "enable_strong_affinity") @enable_strong_affinity.setter def enable_strong_affinity(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_strong_affinity", value) @property @pulumi.getter(name="idleTimeoutSec") def idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. """ return pulumi.get(self, "idle_timeout_sec") @idle_timeout_sec.setter def idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "idle_timeout_sec", value) @property @pulumi.getter(name="trackingMode") def tracking_mode(self) -> Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']]: """ Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). """ return pulumi.get(self, "tracking_mode") @tracking_mode.setter def tracking_mode(self, value: Optional[pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode']]): pulumi.set(self, "tracking_mode", value) @pulumi.input_type class BackendServiceFailoverPolicyArgs: def __init__(__self__, *, disable_connection_drain_on_failover: Optional[pulumi.Input[bool]] = None, drop_traffic_if_unhealthy: Optional[pulumi.Input[bool]] = None, failover_ratio: Optional[pulumi.Input[float]] = None): """ For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes). :param pulumi.Input[bool] disable_connection_drain_on_failover: This can be set to true only if the protocol is TCP. The default is false. :param pulumi.Input[bool] drop_traffic_if_unhealthy: If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. :param pulumi.Input[float] failover_ratio: The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). """ if disable_connection_drain_on_failover is not None: pulumi.set(__self__, "disable_connection_drain_on_failover", disable_connection_drain_on_failover) if drop_traffic_if_unhealthy is not None: pulumi.set(__self__, "drop_traffic_if_unhealthy", drop_traffic_if_unhealthy) if failover_ratio is not None: pulumi.set(__self__, "failover_ratio", failover_ratio) @property @pulumi.getter(name="disableConnectionDrainOnFailover") def disable_connection_drain_on_failover(self) -> Optional[pulumi.Input[bool]]: """ This can be set to true only if the protocol is TCP. The default is false. """ return pulumi.get(self, "disable_connection_drain_on_failover") @disable_connection_drain_on_failover.setter def disable_connection_drain_on_failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_connection_drain_on_failover", value) @property @pulumi.getter(name="dropTrafficIfUnhealthy") def drop_traffic_if_unhealthy(self) -> Optional[pulumi.Input[bool]]: """ If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. """ return pulumi.get(self, "drop_traffic_if_unhealthy") @drop_traffic_if_unhealthy.setter def drop_traffic_if_unhealthy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "drop_traffic_if_unhealthy", value) @property @pulumi.getter(name="failoverRatio") def failover_ratio(self) -> Optional[pulumi.Input[float]]: """ The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). """ return pulumi.get(self, "failover_ratio") @failover_ratio.setter def failover_ratio(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "failover_ratio", value) @pulumi.input_type class BackendServiceIAPOAuth2ClientInfoArgs: def __init__(__self__, *, application_name: Optional[pulumi.Input[str]] = None, client_name: Optional[pulumi.Input[str]] = None, developer_email_address: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] application_name: Application name to be used in OAuth consent screen. :param pulumi.Input[str] client_name: Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. :param pulumi.Input[str] developer_email_address: Developer's information to be used in OAuth consent screen. """ if application_name is not None: pulumi.set(__self__, "application_name", application_name) if client_name is not None: pulumi.set(__self__, "client_name", client_name) if developer_email_address is not None: pulumi.set(__self__, "developer_email_address", developer_email_address) @property @pulumi.getter(name="applicationName") def application_name(self) -> Optional[pulumi.Input[str]]: """ Application name to be used in OAuth consent screen. """ return pulumi.get(self, "application_name") @application_name.setter def application_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "application_name", value) @property @pulumi.getter(name="clientName") def client_name(self) -> Optional[pulumi.Input[str]]: """ Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. """ return pulumi.get(self, "client_name") @client_name.setter def client_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_name", value) @property @pulumi.getter(name="developerEmailAddress") def developer_email_address(self) -> Optional[pulumi.Input[str]]: """ Developer's information to be used in OAuth consent screen. """ return pulumi.get(self, "developer_email_address") @developer_email_address.setter def developer_email_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "developer_email_address", value) @pulumi.input_type class BackendServiceIAPArgs: def __init__(__self__, *, enabled: Optional[pulumi.Input[bool]] = None, oauth2_client_id: Optional[pulumi.Input[str]] = None, oauth2_client_info: Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']] = None, oauth2_client_secret: Optional[pulumi.Input[str]] = None): """ Identity-Aware Proxy :param pulumi.Input[bool] enabled: Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. :param pulumi.Input[str] oauth2_client_id: OAuth2 client ID to use for the authentication flow. :param pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs'] oauth2_client_info: [Input Only] OAuth client info required to generate client id to be used for IAP. :param pulumi.Input[str] oauth2_client_secret: OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly """ if enabled is not None: pulumi.set(__self__, "enabled", enabled) if oauth2_client_id is not None: pulumi.set(__self__, "oauth2_client_id", oauth2_client_id) if oauth2_client_info is not None: pulumi.set(__self__, "oauth2_client_info", oauth2_client_info) if oauth2_client_secret is not None: pulumi.set(__self__, "oauth2_client_secret", oauth2_client_secret) @property @pulumi.getter def enabled(self) -> Optional[pulumi.Input[bool]]: """ Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. """ return pulumi.get(self, "enabled") @enabled.setter def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) @property @pulumi.getter(name="oauth2ClientId") def oauth2_client_id(self) -> Optional[pulumi.Input[str]]: """ OAuth2 client ID to use for the authentication flow. """ return pulumi.get(self, "oauth2_client_id") @oauth2_client_id.setter def oauth2_client_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "oauth2_client_id", value) @property @pulumi.getter(name="oauth2ClientInfo") def oauth2_client_info(self) -> Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']]: """ [Input Only] OAuth client info required to generate client id to be used for IAP. """ return pulumi.get(self, "oauth2_client_info") @oauth2_client_info.setter def oauth2_client_info(self, value: Optional[pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs']]): pulumi.set(self, "oauth2_client_info", value) @property @pulumi.getter(name="oauth2ClientSecret") def oauth2_client_secret(self) -> Optional[pulumi.Input[str]]: """ OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly """ return pulumi.get(self, "oauth2_client_secret") @oauth2_client_secret.setter def oauth2_client_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "oauth2_client_secret", value) @pulumi.input_type class BackendServiceLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, sample_rate: Optional[pulumi.Input[float]] = None): """ The available logging options for the load balancer traffic served by this backend service. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for the load balancer traffic served by this backend service. :param pulumi.Input[float] sample_rate: This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. """ if enable is not None: pulumi.set(__self__, "enable", enable) if sample_rate is not None: pulumi.set(__self__, "sample_rate", sample_rate) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ This field denotes whether to enable logging for the load balancer traffic served by this backend service. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="sampleRate") def sample_rate(self) -> Optional[pulumi.Input[float]]: """ This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. """ return pulumi.get(self, "sample_rate") @sample_rate.setter def sample_rate(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "sample_rate", value) @pulumi.input_type class BackendArgs: def __init__(__self__, *, balancing_mode: Optional[pulumi.Input['BackendBalancingMode']] = None, capacity_scaler: Optional[pulumi.Input[float]] = None, description: Optional[pulumi.Input[str]] = None, failover: Optional[pulumi.Input[bool]] = None, group: Optional[pulumi.Input[str]] = None, max_connections: Optional[pulumi.Input[int]] = None, max_connections_per_endpoint: Optional[pulumi.Input[int]] = None, max_connections_per_instance: Optional[pulumi.Input[int]] = None, max_rate: Optional[pulumi.Input[int]] = None, max_rate_per_endpoint: Optional[pulumi.Input[float]] = None, max_rate_per_instance: Optional[pulumi.Input[float]] = None, max_utilization: Optional[pulumi.Input[float]] = None): """ Message containing information of one individual backend. :param pulumi.Input['BackendBalancingMode'] balancing_mode: Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. :param pulumi.Input[float] capacity_scaler: A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[bool] failover: This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. :param pulumi.Input[str] group: The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. :param pulumi.Input[int] max_connections: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_endpoint: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_instance: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_rate: Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_endpoint: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_instance: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_utilization: Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. """ if balancing_mode is not None: pulumi.set(__self__, "balancing_mode", balancing_mode) if capacity_scaler is not None: pulumi.set(__self__, "capacity_scaler", capacity_scaler) if description is not None: pulumi.set(__self__, "description", description) if failover is not None: pulumi.set(__self__, "failover", failover) if group is not None: pulumi.set(__self__, "group", group) if max_connections is not None: pulumi.set(__self__, "max_connections", max_connections) if max_connections_per_endpoint is not None: pulumi.set(__self__, "max_connections_per_endpoint", max_connections_per_endpoint) if max_connections_per_instance is not None: pulumi.set(__self__, "max_connections_per_instance", max_connections_per_instance) if max_rate is not None: pulumi.set(__self__, "max_rate", max_rate) if max_rate_per_endpoint is not None: pulumi.set(__self__, "max_rate_per_endpoint", max_rate_per_endpoint) if max_rate_per_instance is not None: pulumi.set(__self__, "max_rate_per_instance", max_rate_per_instance) if max_utilization is not None: pulumi.set(__self__, "max_utilization", max_utilization) @property @pulumi.getter(name="balancingMode") def balancing_mode(self) -> Optional[pulumi.Input['BackendBalancingMode']]: """ Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. """ return pulumi.get(self, "balancing_mode") @balancing_mode.setter def balancing_mode(self, value: Optional[pulumi.Input['BackendBalancingMode']]): pulumi.set(self, "balancing_mode", value) @property @pulumi.getter(name="capacityScaler") def capacity_scaler(self) -> Optional[pulumi.Input[float]]: """ A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. """ return pulumi.get(self, "capacity_scaler") @capacity_scaler.setter def capacity_scaler(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "capacity_scaler", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def failover(self) -> Optional[pulumi.Input[bool]]: """ This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. """ return pulumi.get(self, "failover") @failover.setter def failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "failover", value) @property @pulumi.getter def group(self) -> Optional[pulumi.Input[str]]: """ The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. """ return pulumi.get(self, "group") @group.setter def group(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "group", value) @property @pulumi.getter(name="maxConnections") def max_connections(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections") @max_connections.setter def max_connections(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections", value) @property @pulumi.getter(name="maxConnectionsPerEndpoint") def max_connections_per_endpoint(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections_per_endpoint") @max_connections_per_endpoint.setter def max_connections_per_endpoint(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections_per_endpoint", value) @property @pulumi.getter(name="maxConnectionsPerInstance") def max_connections_per_instance(self) -> Optional[pulumi.Input[int]]: """ Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. """ return pulumi.get(self, "max_connections_per_instance") @max_connections_per_instance.setter def max_connections_per_instance(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections_per_instance", value) @property @pulumi.getter(name="maxRate") def max_rate(self) -> Optional[pulumi.Input[int]]: """ Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate") @max_rate.setter def max_rate(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_rate", value) @property @pulumi.getter(name="maxRatePerEndpoint") def max_rate_per_endpoint(self) -> Optional[pulumi.Input[float]]: """ Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate_per_endpoint") @max_rate_per_endpoint.setter def max_rate_per_endpoint(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_rate_per_endpoint", value) @property @pulumi.getter(name="maxRatePerInstance") def max_rate_per_instance(self) -> Optional[pulumi.Input[float]]: """ Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. """ return pulumi.get(self, "max_rate_per_instance") @max_rate_per_instance.setter def max_rate_per_instance(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_rate_per_instance", value) @property @pulumi.getter(name="maxUtilization") def max_utilization(self) -> Optional[pulumi.Input[float]]: """ Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. """ return pulumi.get(self, "max_utilization") @max_utilization.setter def max_utilization(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_utilization", value) @pulumi.input_type class BindingArgs: def __init__(__self__, *, binding_id: Optional[pulumi.Input[str]] = None, condition: Optional[pulumi.Input['ExprArgs']] = None, members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, role: Optional[pulumi.Input[str]] = None): """ Associates `members`, or principals, with a `role`. :param pulumi.Input[str] binding_id: This is deprecated and has no effect. Do not use. :param pulumi.Input['ExprArgs'] condition: The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). :param pulumi.Input[Sequence[pulumi.Input[str]]] members: Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. :param pulumi.Input[str] role: Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ if binding_id is not None: pulumi.set(__self__, "binding_id", binding_id) if condition is not None: pulumi.set(__self__, "condition", condition) if members is not None: pulumi.set(__self__, "members", members) if role is not None: pulumi.set(__self__, "role", role) @property @pulumi.getter(name="bindingId") def binding_id(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "binding_id") @binding_id.setter def binding_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "binding_id", value) @property @pulumi.getter def condition(self) -> Optional[pulumi.Input['ExprArgs']]: """ The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). """ return pulumi.get(self, "condition") @condition.setter def condition(self, value: Optional[pulumi.Input['ExprArgs']]): pulumi.set(self, "condition", value) @property @pulumi.getter def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. """ return pulumi.get(self, "members") @members.setter def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "members", value) @property @pulumi.getter def role(self) -> Optional[pulumi.Input[str]]: """ Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ return pulumi.get(self, "role") @role.setter def role(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "role", value) @pulumi.input_type class CacheKeyPolicyArgs: def __init__(__self__, *, include_host: Optional[pulumi.Input[bool]] = None, include_http_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, include_named_cookies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, include_protocol: Optional[pulumi.Input[bool]] = None, include_query_string: Optional[pulumi.Input[bool]] = None, query_string_blacklist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, query_string_whitelist: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[bool] include_host: If true, requests to different hosts will be cached separately. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_named_cookies: Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. :param pulumi.Input[bool] include_protocol: If true, http and https requests will be cached separately. :param pulumi.Input[bool] include_query_string: If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_blacklist: Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ if include_host is not None: pulumi.set(__self__, "include_host", include_host) if include_http_headers is not None: pulumi.set(__self__, "include_http_headers", include_http_headers) if include_named_cookies is not None: pulumi.set(__self__, "include_named_cookies", include_named_cookies) if include_protocol is not None: pulumi.set(__self__, "include_protocol", include_protocol) if include_query_string is not None: pulumi.set(__self__, "include_query_string", include_query_string) if query_string_blacklist is not None: pulumi.set(__self__, "query_string_blacklist", query_string_blacklist) if query_string_whitelist is not None: pulumi.set(__self__, "query_string_whitelist", query_string_whitelist) @property @pulumi.getter(name="includeHost") def include_host(self) -> Optional[pulumi.Input[bool]]: """ If true, requests to different hosts will be cached separately. """ return pulumi.get(self, "include_host") @include_host.setter def include_host(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_host", value) @property @pulumi.getter(name="includeHttpHeaders") def include_http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP request headers (by name) to be used in the cache key. """ return pulumi.get(self, "include_http_headers") @include_http_headers.setter def include_http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_http_headers", value) @property @pulumi.getter(name="includeNamedCookies") def include_named_cookies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. """ return pulumi.get(self, "include_named_cookies") @include_named_cookies.setter def include_named_cookies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "include_named_cookies", value) @property @pulumi.getter(name="includeProtocol") def include_protocol(self) -> Optional[pulumi.Input[bool]]: """ If true, http and https requests will be cached separately. """ return pulumi.get(self, "include_protocol") @include_protocol.setter def include_protocol(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_protocol", value) @property @pulumi.getter(name="includeQueryString") def include_query_string(self) -> Optional[pulumi.Input[bool]]: """ If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. """ return pulumi.get(self, "include_query_string") @include_query_string.setter def include_query_string(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "include_query_string", value) @property @pulumi.getter(name="queryStringBlacklist") def query_string_blacklist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_blacklist") @query_string_blacklist.setter def query_string_blacklist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_blacklist", value) @property @pulumi.getter(name="queryStringWhitelist") def query_string_whitelist(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. """ return pulumi.get(self, "query_string_whitelist") @query_string_whitelist.setter def query_string_whitelist(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "query_string_whitelist", value) @pulumi.input_type class CallCredentialsArgs: def __init__(__self__, *, call_credential_type: Optional[pulumi.Input['CallCredentialsCallCredentialType']] = None, from_plugin: Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']] = None): """ [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. :param pulumi.Input['CallCredentialsCallCredentialType'] call_credential_type: The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. :param pulumi.Input['MetadataCredentialsFromPluginArgs'] from_plugin: Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. """ if call_credential_type is not None: pulumi.set(__self__, "call_credential_type", call_credential_type) if from_plugin is not None: pulumi.set(__self__, "from_plugin", from_plugin) @property @pulumi.getter(name="callCredentialType") def call_credential_type(self) -> Optional[pulumi.Input['CallCredentialsCallCredentialType']]: """ The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. """ return pulumi.get(self, "call_credential_type") @call_credential_type.setter def call_credential_type(self, value: Optional[pulumi.Input['CallCredentialsCallCredentialType']]): pulumi.set(self, "call_credential_type", value) @property @pulumi.getter(name="fromPlugin") def from_plugin(self) -> Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']]: """ Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. """ return pulumi.get(self, "from_plugin") @from_plugin.setter def from_plugin(self, value: Optional[pulumi.Input['MetadataCredentialsFromPluginArgs']]): pulumi.set(self, "from_plugin", value) @pulumi.input_type class ChannelCredentialsArgs: def __init__(__self__, *, certificates: Optional[pulumi.Input['TlsCertificatePathsArgs']] = None, channel_credential_type: Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']] = None): """ [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. :param pulumi.Input['TlsCertificatePathsArgs'] certificates: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsChannelCredentialType'] channel_credential_type: The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. """ if certificates is not None: pulumi.set(__self__, "certificates", certificates) if channel_credential_type is not None: pulumi.set(__self__, "channel_credential_type", channel_credential_type) @property @pulumi.getter def certificates(self) -> Optional[pulumi.Input['TlsCertificatePathsArgs']]: """ The call credentials to access the SDS server. """ return pulumi.get(self, "certificates") @certificates.setter def certificates(self, value: Optional[pulumi.Input['TlsCertificatePathsArgs']]): pulumi.set(self, "certificates", value) @property @pulumi.getter(name="channelCredentialType") def channel_credential_type(self) -> Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']]: """ The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. """ return pulumi.get(self, "channel_credential_type") @channel_credential_type.setter def channel_credential_type(self, value: Optional[pulumi.Input['ChannelCredentialsChannelCredentialType']]): pulumi.set(self, "channel_credential_type", value) @pulumi.input_type class CircuitBreakersArgs: def __init__(__self__, *, connect_timeout: Optional[pulumi.Input['DurationArgs']] = None, max_connections: Optional[pulumi.Input[int]] = None, max_pending_requests: Optional[pulumi.Input[int]] = None, max_requests: Optional[pulumi.Input[int]] = None, max_requests_per_connection: Optional[pulumi.Input[int]] = None, max_retries: Optional[pulumi.Input[int]] = None): """ Settings controlling the volume of requests, connections and retries to this backend service. :param pulumi.Input['DurationArgs'] connect_timeout: The timeout for new network connections to hosts. :param pulumi.Input[int] max_connections: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_pending_requests: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_requests: The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. :param pulumi.Input[int] max_requests_per_connection: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_retries: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ if connect_timeout is not None: pulumi.set(__self__, "connect_timeout", connect_timeout) if max_connections is not None: pulumi.set(__self__, "max_connections", max_connections) if max_pending_requests is not None: pulumi.set(__self__, "max_pending_requests", max_pending_requests) if max_requests is not None: pulumi.set(__self__, "max_requests", max_requests) if max_requests_per_connection is not None: pulumi.set(__self__, "max_requests_per_connection", max_requests_per_connection) if max_retries is not None: pulumi.set(__self__, "max_retries", max_retries) @property @pulumi.getter(name="connectTimeout") def connect_timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ The timeout for new network connections to hosts. """ return pulumi.get(self, "connect_timeout") @connect_timeout.setter def connect_timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "connect_timeout", value) @property @pulumi.getter(name="maxConnections") def max_connections(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_connections") @max_connections.setter def max_connections(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_connections", value) @property @pulumi.getter(name="maxPendingRequests") def max_pending_requests(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_pending_requests") @max_pending_requests.setter def max_pending_requests(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_pending_requests", value) @property @pulumi.getter(name="maxRequests") def max_requests(self) -> Optional[pulumi.Input[int]]: """ The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. """ return pulumi.get(self, "max_requests") @max_requests.setter def max_requests(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_requests", value) @property @pulumi.getter(name="maxRequestsPerConnection") def max_requests_per_connection(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_requests_per_connection") @max_requests_per_connection.setter def max_requests_per_connection(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_requests_per_connection", value) @property @pulumi.getter(name="maxRetries") def max_retries(self) -> Optional[pulumi.Input[int]]: """ Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "max_retries") @max_retries.setter def max_retries(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_retries", value) @pulumi.input_type class ConditionArgs: def __init__(__self__, *, iam: Optional[pulumi.Input['ConditionIam']] = None, op: Optional[pulumi.Input['ConditionOp']] = None, svc: Optional[pulumi.Input[str]] = None, sys: Optional[pulumi.Input['ConditionSys']] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionIam'] iam: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionOp'] op: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] svc: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionSys'] sys: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: This is deprecated and has no effect. Do not use. """ if iam is not None: pulumi.set(__self__, "iam", iam) if op is not None: pulumi.set(__self__, "op", op) if svc is not None: pulumi.set(__self__, "svc", svc) if sys is not None: pulumi.set(__self__, "sys", sys) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def iam(self) -> Optional[pulumi.Input['ConditionIam']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "iam") @iam.setter def iam(self, value: Optional[pulumi.Input['ConditionIam']]): pulumi.set(self, "iam", value) @property @pulumi.getter def op(self) -> Optional[pulumi.Input['ConditionOp']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "op") @op.setter def op(self, value: Optional[pulumi.Input['ConditionOp']]): pulumi.set(self, "op", value) @property @pulumi.getter def svc(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "svc") @svc.setter def svc(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "svc", value) @property @pulumi.getter def sys(self) -> Optional[pulumi.Input['ConditionSys']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "sys") @sys.setter def sys(self, value: Optional[pulumi.Input['ConditionSys']]): pulumi.set(self, "sys", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class ConfidentialInstanceConfigArgs: def __init__(__self__, *, enable_confidential_compute: Optional[pulumi.Input[bool]] = None): """ A set of Confidential Instance options. :param pulumi.Input[bool] enable_confidential_compute: Defines whether the instance should have confidential compute enabled. """ if enable_confidential_compute is not None: pulumi.set(__self__, "enable_confidential_compute", enable_confidential_compute) @property @pulumi.getter(name="enableConfidentialCompute") def enable_confidential_compute(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance should have confidential compute enabled. """ return pulumi.get(self, "enable_confidential_compute") @enable_confidential_compute.setter def enable_confidential_compute(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_confidential_compute", value) @pulumi.input_type class ConnectionDrainingArgs: def __init__(__self__, *, draining_timeout_sec: Optional[pulumi.Input[int]] = None): """ Message containing connection draining configuration. :param pulumi.Input[int] draining_timeout_sec: Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. """ if draining_timeout_sec is not None: pulumi.set(__self__, "draining_timeout_sec", draining_timeout_sec) @property @pulumi.getter(name="drainingTimeoutSec") def draining_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. """ return pulumi.get(self, "draining_timeout_sec") @draining_timeout_sec.setter def draining_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "draining_timeout_sec", value) @pulumi.input_type class ConsistentHashLoadBalancerSettingsHttpCookieArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, path: Optional[pulumi.Input[str]] = None, ttl: Optional[pulumi.Input['DurationArgs']] = None): """ The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash. :param pulumi.Input[str] name: Name of the cookie. :param pulumi.Input[str] path: Path to set for the cookie. :param pulumi.Input['DurationArgs'] ttl: Lifetime of the cookie. """ if name is not None: pulumi.set(__self__, "name", name) if path is not None: pulumi.set(__self__, "path", path) if ttl is not None: pulumi.set(__self__, "ttl", ttl) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the cookie. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def path(self) -> Optional[pulumi.Input[str]]: """ Path to set for the cookie. """ return pulumi.get(self, "path") @path.setter def path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path", value) @property @pulumi.getter def ttl(self) -> Optional[pulumi.Input['DurationArgs']]: """ Lifetime of the cookie. """ return pulumi.get(self, "ttl") @ttl.setter def ttl(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "ttl", value) @pulumi.input_type class ConsistentHashLoadBalancerSettingsArgs: def __init__(__self__, *, http_cookie: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']] = None, http_header_name: Optional[pulumi.Input[str]] = None, minimum_ring_size: Optional[pulumi.Input[str]] = None): """ This message defines settings for a consistent hash style load balancer. :param pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs'] http_cookie: Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] http_header_name: The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. :param pulumi.Input[str] minimum_ring_size: The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. """ if http_cookie is not None: pulumi.set(__self__, "http_cookie", http_cookie) if http_header_name is not None: pulumi.set(__self__, "http_header_name", http_header_name) if minimum_ring_size is not None: pulumi.set(__self__, "minimum_ring_size", minimum_ring_size) @property @pulumi.getter(name="httpCookie") def http_cookie(self) -> Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']]: """ Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_cookie") @http_cookie.setter def http_cookie(self, value: Optional[pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs']]): pulumi.set(self, "http_cookie", value) @property @pulumi.getter(name="httpHeaderName") def http_header_name(self) -> Optional[pulumi.Input[str]]: """ The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. """ return pulumi.get(self, "http_header_name") @http_header_name.setter def http_header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "http_header_name", value) @property @pulumi.getter(name="minimumRingSize") def minimum_ring_size(self) -> Optional[pulumi.Input[str]]: """ The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. """ return pulumi.get(self, "minimum_ring_size") @minimum_ring_size.setter def minimum_ring_size(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "minimum_ring_size", value) @pulumi.input_type class CorsPolicyArgs: def __init__(__self__, *, allow_credentials: Optional[pulumi.Input[bool]] = None, allow_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_origin_regexes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, allow_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, disabled: Optional[pulumi.Input[bool]] = None, expose_headers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, max_age: Optional[pulumi.Input[int]] = None): """ The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. :param pulumi.Input[bool] allow_credentials: In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_headers: Specifies the content for the Access-Control-Allow-Headers header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_methods: Specifies the content for the Access-Control-Allow-Methods header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origin_regexes: Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origins: Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[bool] disabled: If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. :param pulumi.Input[Sequence[pulumi.Input[str]]] expose_headers: Specifies the content for the Access-Control-Expose-Headers header. :param pulumi.Input[int] max_age: Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. """ if allow_credentials is not None: pulumi.set(__self__, "allow_credentials", allow_credentials) if allow_headers is not None: pulumi.set(__self__, "allow_headers", allow_headers) if allow_methods is not None: pulumi.set(__self__, "allow_methods", allow_methods) if allow_origin_regexes is not None: pulumi.set(__self__, "allow_origin_regexes", allow_origin_regexes) if allow_origins is not None: pulumi.set(__self__, "allow_origins", allow_origins) if disabled is not None: pulumi.set(__self__, "disabled", disabled) if expose_headers is not None: pulumi.set(__self__, "expose_headers", expose_headers) if max_age is not None: pulumi.set(__self__, "max_age", max_age) @property @pulumi.getter(name="allowCredentials") def allow_credentials(self) -> Optional[pulumi.Input[bool]]: """ In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. """ return pulumi.get(self, "allow_credentials") @allow_credentials.setter def allow_credentials(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "allow_credentials", value) @property @pulumi.getter(name="allowHeaders") def allow_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Allow-Headers header. """ return pulumi.get(self, "allow_headers") @allow_headers.setter def allow_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_headers", value) @property @pulumi.getter(name="allowMethods") def allow_methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Allow-Methods header. """ return pulumi.get(self, "allow_methods") @allow_methods.setter def allow_methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_methods", value) @property @pulumi.getter(name="allowOriginRegexes") def allow_origin_regexes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. """ return pulumi.get(self, "allow_origin_regexes") @allow_origin_regexes.setter def allow_origin_regexes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_origin_regexes", value) @property @pulumi.getter(name="allowOrigins") def allow_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. """ return pulumi.get(self, "allow_origins") @allow_origins.setter def allow_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "allow_origins", value) @property @pulumi.getter def disabled(self) -> Optional[pulumi.Input[bool]]: """ If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. """ return pulumi.get(self, "disabled") @disabled.setter def disabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disabled", value) @property @pulumi.getter(name="exposeHeaders") def expose_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies the content for the Access-Control-Expose-Headers header. """ return pulumi.get(self, "expose_headers") @expose_headers.setter def expose_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "expose_headers", value) @property @pulumi.getter(name="maxAge") def max_age(self) -> Optional[pulumi.Input[int]]: """ Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. """ return pulumi.get(self, "max_age") @max_age.setter def max_age(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_age", value) @pulumi.input_type class CustomerEncryptionKeyArgs: def __init__(__self__, *, kms_key_name: Optional[pulumi.Input[str]] = None, kms_key_service_account: Optional[pulumi.Input[str]] = None, raw_key: Optional[pulumi.Input[str]] = None, rsa_encrypted_key: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] kms_key_name: The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key :param pulumi.Input[str] kms_key_service_account: The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ :param pulumi.Input[str] raw_key: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" :param pulumi.Input[str] rsa_encrypted_key: Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem """ if kms_key_name is not None: pulumi.set(__self__, "kms_key_name", kms_key_name) if kms_key_service_account is not None: pulumi.set(__self__, "kms_key_service_account", kms_key_service_account) if raw_key is not None: pulumi.set(__self__, "raw_key", raw_key) if rsa_encrypted_key is not None: pulumi.set(__self__, "rsa_encrypted_key", rsa_encrypted_key) @property @pulumi.getter(name="kmsKeyName") def kms_key_name(self) -> Optional[pulumi.Input[str]]: """ The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key """ return pulumi.get(self, "kms_key_name") @kms_key_name.setter def kms_key_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kms_key_name", value) @property @pulumi.getter(name="kmsKeyServiceAccount") def kms_key_service_account(self) -> Optional[pulumi.Input[str]]: """ The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ """ return pulumi.get(self, "kms_key_service_account") @kms_key_service_account.setter def kms_key_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kms_key_service_account", value) @property @pulumi.getter(name="rawKey") def raw_key(self) -> Optional[pulumi.Input[str]]: """ Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" """ return pulumi.get(self, "raw_key") @raw_key.setter def raw_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "raw_key", value) @property @pulumi.getter(name="rsaEncryptedKey") def rsa_encrypted_key(self) -> Optional[pulumi.Input[str]]: """ Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem """ return pulumi.get(self, "rsa_encrypted_key") @rsa_encrypted_key.setter def rsa_encrypted_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rsa_encrypted_key", value) @pulumi.input_type class DeprecationStatusArgs: def __init__(__self__, *, deleted: Optional[pulumi.Input[str]] = None, deprecated: Optional[pulumi.Input[str]] = None, obsolete: Optional[pulumi.Input[str]] = None, replacement: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input['DeprecationStatusState']] = None, state_override: Optional[pulumi.Input['RolloutPolicyArgs']] = None): """ Deprecation status for a public resource. :param pulumi.Input[str] deleted: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] deprecated: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] obsolete: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] replacement: The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. :param pulumi.Input['DeprecationStatusState'] state: The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. :param pulumi.Input['RolloutPolicyArgs'] state_override: The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. """ if deleted is not None: pulumi.set(__self__, "deleted", deleted) if deprecated is not None: pulumi.set(__self__, "deprecated", deprecated) if obsolete is not None: pulumi.set(__self__, "obsolete", obsolete) if replacement is not None: pulumi.set(__self__, "replacement", replacement) if state is not None: pulumi.set(__self__, "state", state) if state_override is not None: pulumi.set(__self__, "state_override", state_override) @property @pulumi.getter def deleted(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "deleted") @deleted.setter def deleted(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deleted", value) @property @pulumi.getter def deprecated(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "deprecated") @deprecated.setter def deprecated(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "deprecated", value) @property @pulumi.getter def obsolete(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. """ return pulumi.get(self, "obsolete") @obsolete.setter def obsolete(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "obsolete", value) @property @pulumi.getter def replacement(self) -> Optional[pulumi.Input[str]]: """ The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. """ return pulumi.get(self, "replacement") @replacement.setter def replacement(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "replacement", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input['DeprecationStatusState']]: """ The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. """ return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input['DeprecationStatusState']]): pulumi.set(self, "state", value) @property @pulumi.getter(name="stateOverride") def state_override(self) -> Optional[pulumi.Input['RolloutPolicyArgs']]: """ The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. """ return pulumi.get(self, "state_override") @state_override.setter def state_override(self, value: Optional[pulumi.Input['RolloutPolicyArgs']]): pulumi.set(self, "state_override", value) @pulumi.input_type class DiskInstantiationConfigArgs: def __init__(__self__, *, auto_delete: Optional[pulumi.Input[bool]] = None, custom_image: Optional[pulumi.Input[str]] = None, device_name: Optional[pulumi.Input[str]] = None, instantiate_from: Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']] = None): """ A specification of the desired way to instantiate a disk in the instance template when its created from a source instance. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[str] custom_image: The custom source image to be used to restore this disk when instantiating this instance template. :param pulumi.Input[str] device_name: Specifies the device name of the disk to which the configurations apply to. :param pulumi.Input['DiskInstantiationConfigInstantiateFrom'] instantiate_from: Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. """ if auto_delete is not None: pulumi.set(__self__, "auto_delete", auto_delete) if custom_image is not None: pulumi.set(__self__, "custom_image", custom_image) if device_name is not None: pulumi.set(__self__, "device_name", device_name) if instantiate_from is not None: pulumi.set(__self__, "instantiate_from", instantiate_from) @property @pulumi.getter(name="autoDelete") def auto_delete(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). """ return pulumi.get(self, "auto_delete") @auto_delete.setter def auto_delete(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_delete", value) @property @pulumi.getter(name="customImage") def custom_image(self) -> Optional[pulumi.Input[str]]: """ The custom source image to be used to restore this disk when instantiating this instance template. """ return pulumi.get(self, "custom_image") @custom_image.setter def custom_image(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_image", value) @property @pulumi.getter(name="deviceName") def device_name(self) -> Optional[pulumi.Input[str]]: """ Specifies the device name of the disk to which the configurations apply to. """ return pulumi.get(self, "device_name") @device_name.setter def device_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "device_name", value) @property @pulumi.getter(name="instantiateFrom") def instantiate_from(self) -> Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']]: """ Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. """ return pulumi.get(self, "instantiate_from") @instantiate_from.setter def instantiate_from(self, value: Optional[pulumi.Input['DiskInstantiationConfigInstantiateFrom']]): pulumi.set(self, "instantiate_from", value) @pulumi.input_type class DisplayDeviceArgs: def __init__(__self__, *, enable_display: Optional[pulumi.Input[bool]] = None): """ A set of Display Device options :param pulumi.Input[bool] enable_display: Defines whether the instance has Display enabled. """ if enable_display is not None: pulumi.set(__self__, "enable_display", enable_display) @property @pulumi.getter(name="enableDisplay") def enable_display(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Display enabled. """ return pulumi.get(self, "enable_display") @enable_display.setter def enable_display(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_display", value) @pulumi.input_type class DistributionPolicyZoneConfigurationArgs: def __init__(__self__, *, zone: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] zone: The URL of the zone. The zone must exist in the region where the managed instance group is located. """ if zone is not None: pulumi.set(__self__, "zone", zone) @property @pulumi.getter def zone(self) -> Optional[pulumi.Input[str]]: """ The URL of the zone. The zone must exist in the region where the managed instance group is located. """ return pulumi.get(self, "zone") @zone.setter def zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "zone", value) @pulumi.input_type class DistributionPolicyArgs: def __init__(__self__, *, target_shape: Optional[pulumi.Input['DistributionPolicyTargetShape']] = None, zones: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]] = None): """ :param pulumi.Input['DistributionPolicyTargetShape'] target_shape: The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). :param pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]] zones: Zones where the regional managed instance group will create and manage its instances. """ if target_shape is not None: pulumi.set(__self__, "target_shape", target_shape) if zones is not None: pulumi.set(__self__, "zones", zones) @property @pulumi.getter(name="targetShape") def target_shape(self) -> Optional[pulumi.Input['DistributionPolicyTargetShape']]: """ The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). """ return pulumi.get(self, "target_shape") @target_shape.setter def target_shape(self, value: Optional[pulumi.Input['DistributionPolicyTargetShape']]): pulumi.set(self, "target_shape", value) @property @pulumi.getter def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]]: """ Zones where the regional managed instance group will create and manage its instances. """ return pulumi.get(self, "zones") @zones.setter def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]]]): pulumi.set(self, "zones", value) @pulumi.input_type class DurationArgs: def __init__(__self__, *, nanos: Optional[pulumi.Input[int]] = None, seconds: Optional[pulumi.Input[str]] = None): """ A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. :param pulumi.Input[int] nanos: Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. :param pulumi.Input[str] seconds: Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years """ if nanos is not None: pulumi.set(__self__, "nanos", nanos) if seconds is not None: pulumi.set(__self__, "seconds", seconds) @property @pulumi.getter def nanos(self) -> Optional[pulumi.Input[int]]: """ Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. """ return pulumi.get(self, "nanos") @nanos.setter def nanos(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "nanos", value) @property @pulumi.getter def seconds(self) -> Optional[pulumi.Input[str]]: """ Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years """ return pulumi.get(self, "seconds") @seconds.setter def seconds(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "seconds", value) @pulumi.input_type class ExprArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, expression: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None): """ Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. :param pulumi.Input[str] description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. :param pulumi.Input[str] expression: Textual representation of an expression in Common Expression Language syntax. :param pulumi.Input[str] location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. :param pulumi.Input[str] title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ if description is not None: pulumi.set(__self__, "description", description) if expression is not None: pulumi.set(__self__, "expression", expression) if location is not None: pulumi.set(__self__, "location", location) if title is not None: pulumi.set(__self__, "title", title) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def expression(self) -> Optional[pulumi.Input[str]]: """ Textual representation of an expression in Common Expression Language syntax. """ return pulumi.get(self, "expression") @expression.setter def expression(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expression", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def title(self) -> Optional[pulumi.Input[str]]: """ Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ return pulumi.get(self, "title") @title.setter def title(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "title", value) @pulumi.input_type class ExternalVpnGatewayInterfaceArgs: def __init__(__self__, *, id: Optional[pulumi.Input[int]] = None, ip_address: Optional[pulumi.Input[str]] = None): """ The interface for the external VPN gateway. :param pulumi.Input[int] id: The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 :param pulumi.Input[str] ip_address: IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. """ if id is not None: pulumi.set(__self__, "id", id) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[int]]: """ The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "id", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @pulumi.input_type class FileContentBufferArgs: def __init__(__self__, *, content: Optional[pulumi.Input[str]] = None, file_type: Optional[pulumi.Input['FileContentBufferFileType']] = None): """ :param pulumi.Input[str] content: The raw content in the secure keys file. :param pulumi.Input['FileContentBufferFileType'] file_type: The file type of source file. """ if content is not None: pulumi.set(__self__, "content", content) if file_type is not None: pulumi.set(__self__, "file_type", file_type) @property @pulumi.getter def content(self) -> Optional[pulumi.Input[str]]: """ The raw content in the secure keys file. """ return pulumi.get(self, "content") @content.setter def content(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "content", value) @property @pulumi.getter(name="fileType") def file_type(self) -> Optional[pulumi.Input['FileContentBufferFileType']]: """ The file type of source file. """ return pulumi.get(self, "file_type") @file_type.setter def file_type(self, value: Optional[pulumi.Input['FileContentBufferFileType']]): pulumi.set(self, "file_type", value) @pulumi.input_type class FirewallAllowedItemArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallDeniedItemArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, metadata: Optional[pulumi.Input['FirewallLogConfigMetadata']] = None): """ The available logging options for a firewall rule. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for a particular firewall rule. :param pulumi.Input['FirewallLogConfigMetadata'] metadata: This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. """ if enable is not None: pulumi.set(__self__, "enable", enable) if metadata is not None: pulumi.set(__self__, "metadata", metadata) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ This field denotes whether to enable logging for a particular firewall rule. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['FirewallLogConfigMetadata']]: """ This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['FirewallLogConfigMetadata']]): pulumi.set(self, "metadata", value) @pulumi.input_type class FirewallPolicyAssociationArgs: def __init__(__self__, *, attachment_target: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] attachment_target: The target that the firewall policy is attached to. :param pulumi.Input[str] name: The name for an association. """ if attachment_target is not None: pulumi.set(__self__, "attachment_target", attachment_target) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="attachmentTarget") def attachment_target(self) -> Optional[pulumi.Input[str]]: """ The target that the firewall policy is attached to. """ return pulumi.get(self, "attachment_target") @attachment_target.setter def attachment_target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "attachment_target", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for an association. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class FirewallPolicyRuleMatcherLayer4ConfigArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class FirewallPolicyRuleMatcherArgs: def __init__(__self__, *, dest_address_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_region_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, layer4_configs: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]] = None, src_address_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_fqdns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_region_codes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, src_secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]] = None): """ Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_address_groups: Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_region_codes: Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_address_groups: Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_region_codes: Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] src_secure_tags: List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. """ if dest_address_groups is not None: pulumi.set(__self__, "dest_address_groups", dest_address_groups) if dest_fqdns is not None: pulumi.set(__self__, "dest_fqdns", dest_fqdns) if dest_ip_ranges is not None: pulumi.set(__self__, "dest_ip_ranges", dest_ip_ranges) if dest_region_codes is not None: pulumi.set(__self__, "dest_region_codes", dest_region_codes) if layer4_configs is not None: pulumi.set(__self__, "layer4_configs", layer4_configs) if src_address_groups is not None: pulumi.set(__self__, "src_address_groups", src_address_groups) if src_fqdns is not None: pulumi.set(__self__, "src_fqdns", src_fqdns) if src_ip_ranges is not None: pulumi.set(__self__, "src_ip_ranges", src_ip_ranges) if src_region_codes is not None: pulumi.set(__self__, "src_region_codes", src_region_codes) if src_secure_tags is not None: pulumi.set(__self__, "src_secure_tags", src_secure_tags) @property @pulumi.getter(name="destAddressGroups") def dest_address_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. """ return pulumi.get(self, "dest_address_groups") @dest_address_groups.setter def dest_address_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_address_groups", value) @property @pulumi.getter(name="destFqdns") def dest_fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. """ return pulumi.get(self, "dest_fqdns") @dest_fqdns.setter def dest_fqdns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_fqdns", value) @property @pulumi.getter(name="destIpRanges") def dest_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. """ return pulumi.get(self, "dest_ip_ranges") @dest_ip_ranges.setter def dest_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_ip_ranges", value) @property @pulumi.getter(name="destRegionCodes") def dest_region_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. """ return pulumi.get(self, "dest_region_codes") @dest_region_codes.setter def dest_region_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_region_codes", value) @property @pulumi.getter(name="layer4Configs") def layer4_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. """ return pulumi.get(self, "layer4_configs") @layer4_configs.setter def layer4_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]]]): pulumi.set(self, "layer4_configs", value) @property @pulumi.getter(name="srcAddressGroups") def src_address_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. """ return pulumi.get(self, "src_address_groups") @src_address_groups.setter def src_address_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_address_groups", value) @property @pulumi.getter(name="srcFqdns") def src_fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. """ return pulumi.get(self, "src_fqdns") @src_fqdns.setter def src_fqdns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_fqdns", value) @property @pulumi.getter(name="srcIpRanges") def src_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. """ return pulumi.get(self, "src_ip_ranges") @src_ip_ranges.setter def src_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_ip_ranges", value) @property @pulumi.getter(name="srcRegionCodes") def src_region_codes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. """ return pulumi.get(self, "src_region_codes") @src_region_codes.setter def src_region_codes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_region_codes", value) @property @pulumi.getter(name="srcSecureTags") def src_secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]: """ List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. """ return pulumi.get(self, "src_secure_tags") @src_secure_tags.setter def src_secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]): pulumi.set(self, "src_secure_tags", value) @pulumi.input_type class FirewallPolicyRuleSecureTagArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: Name of the secure tag, created with TagManager's TagValue API. """ if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the secure tag, created with TagManager's TagValue API. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class FirewallPolicyRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, direction: Optional[pulumi.Input['FirewallPolicyRuleDirection']] = None, disabled: Optional[pulumi.Input[bool]] = None, enable_logging: Optional[pulumi.Input[bool]] = None, match: Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']] = None, priority: Optional[pulumi.Input[int]] = None, target_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, target_secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]] = None, target_service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. :param pulumi.Input[str] description: An optional description for this resource. :param pulumi.Input['FirewallPolicyRuleDirection'] direction: The direction in which this rule applies. :param pulumi.Input[bool] disabled: Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. :param pulumi.Input['FirewallPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] target_secure_tags: A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if direction is not None: pulumi.set(__self__, "direction", direction) if disabled is not None: pulumi.set(__self__, "disabled", disabled) if enable_logging is not None: pulumi.set(__self__, "enable_logging", enable_logging) if match is not None: pulumi.set(__self__, "match", match) if priority is not None: pulumi.set(__self__, "priority", priority) if target_resources is not None: pulumi.set(__self__, "target_resources", target_resources) if target_secure_tags is not None: pulumi.set(__self__, "target_secure_tags", target_secure_tags) if target_service_accounts is not None: pulumi.set(__self__, "target_service_accounts", target_service_accounts) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description for this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['FirewallPolicyRuleDirection']]: """ The direction in which this rule applies. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['FirewallPolicyRuleDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter def disabled(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. """ return pulumi.get(self, "disabled") @disabled.setter def disabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disabled", value) @property @pulumi.getter(name="enableLogging") def enable_logging(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. """ return pulumi.get(self, "enable_logging") @enable_logging.setter def enable_logging(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_logging", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']]: """ A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input['FirewallPolicyRuleMatcherArgs']]): pulumi.set(self, "match", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="targetResources") def target_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. """ return pulumi.get(self, "target_resources") @target_resources.setter def target_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_resources", value) @property @pulumi.getter(name="targetSecureTags") def target_secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]: """ A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. """ return pulumi.get(self, "target_secure_tags") @target_secure_tags.setter def target_secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]]]): pulumi.set(self, "target_secure_tags", value) @property @pulumi.getter(name="targetServiceAccounts") def target_service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of service accounts indicating the sets of instances that are applied with this rule. """ return pulumi.get(self, "target_service_accounts") @target_service_accounts.setter def target_service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_service_accounts", value) @pulumi.input_type class FixedOrPercentArgs: def __init__(__self__, *, fixed: Optional[pulumi.Input[int]] = None, percent: Optional[pulumi.Input[int]] = None): """ Encapsulates numeric value that can be either absolute or relative. :param pulumi.Input[int] fixed: Specifies a fixed number of VM instances. This must be a positive integer. :param pulumi.Input[int] percent: Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. """ if fixed is not None: pulumi.set(__self__, "fixed", fixed) if percent is not None: pulumi.set(__self__, "percent", percent) @property @pulumi.getter def fixed(self) -> Optional[pulumi.Input[int]]: """ Specifies a fixed number of VM instances. This must be a positive integer. """ return pulumi.get(self, "fixed") @fixed.setter def fixed(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "fixed", value) @property @pulumi.getter def percent(self) -> Optional[pulumi.Input[int]]: """ Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. """ return pulumi.get(self, "percent") @percent.setter def percent(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "percent", value) @pulumi.input_type class ForwardingRuleServiceDirectoryRegistrationArgs: def __init__(__self__, *, namespace: Optional[pulumi.Input[str]] = None, service: Optional[pulumi.Input[str]] = None, service_directory_region: Optional[pulumi.Input[str]] = None): """ Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule. :param pulumi.Input[str] namespace: Service Directory namespace to register the forwarding rule under. :param pulumi.Input[str] service: Service Directory service to register the forwarding rule under. :param pulumi.Input[str] service_directory_region: [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. """ if namespace is not None: pulumi.set(__self__, "namespace", namespace) if service is not None: pulumi.set(__self__, "service", service) if service_directory_region is not None: pulumi.set(__self__, "service_directory_region", service_directory_region) @property @pulumi.getter def namespace(self) -> Optional[pulumi.Input[str]]: """ Service Directory namespace to register the forwarding rule under. """ return pulumi.get(self, "namespace") @namespace.setter def namespace(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "namespace", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Service Directory service to register the forwarding rule under. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="serviceDirectoryRegion") def service_directory_region(self) -> Optional[pulumi.Input[str]]: """ [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. """ return pulumi.get(self, "service_directory_region") @service_directory_region.setter def service_directory_region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service_directory_region", value) @pulumi.input_type class FutureReservationSpecificSKUPropertiesArgs: def __init__(__self__, *, instance_properties: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']] = None, total_count: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: Properties of the SKU instances being reserved. :param pulumi.Input[str] total_count: Total number of instances for which capacity assurance is requested at a future time period. """ if instance_properties is not None: pulumi.set(__self__, "instance_properties", instance_properties) if total_count is not None: pulumi.set(__self__, "total_count", total_count) @property @pulumi.getter(name="instanceProperties") def instance_properties(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]: """ Properties of the SKU instances being reserved. """ return pulumi.get(self, "instance_properties") @instance_properties.setter def instance_properties(self, value: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs']]): pulumi.set(self, "instance_properties", value) @property @pulumi.getter(name="totalCount") def total_count(self) -> Optional[pulumi.Input[str]]: """ Total number of instances for which capacity assurance is requested at a future time period. """ return pulumi.get(self, "total_count") @total_count.setter def total_count(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "total_count", value) @pulumi.input_type class FutureReservationTimeWindowArgs: def __init__(__self__, *, duration: Optional[pulumi.Input['DurationArgs']] = None, end_time: Optional[pulumi.Input[str]] = None, start_time: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] start_time: Start time of the Future Reservation. The start_time is an RFC3339 string. """ if duration is not None: pulumi.set(__self__, "duration", duration) if end_time is not None: pulumi.set(__self__, "end_time", end_time) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def duration(self) -> Optional[pulumi.Input['DurationArgs']]: return pulumi.get(self, "duration") @duration.setter def duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "duration", value) @property @pulumi.getter(name="endTime") def end_time(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "end_time") @end_time.setter def end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "end_time", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the Future Reservation. The start_time is an RFC3339 string. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class GRPCHealthCheckArgs: def __init__(__self__, *, grpc_service_name: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['GRPCHealthCheckPortSpecification']] = None): """ :param pulumi.Input[str] grpc_service_name: The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. :param pulumi.Input[int] port: The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. :param pulumi.Input['GRPCHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. """ if grpc_service_name is not None: pulumi.set(__self__, "grpc_service_name", grpc_service_name) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) @property @pulumi.getter(name="grpcServiceName") def grpc_service_name(self) -> Optional[pulumi.Input[str]]: """ The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. """ return pulumi.get(self, "grpc_service_name") @grpc_service_name.setter def grpc_service_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "grpc_service_name", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['GRPCHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['GRPCHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @pulumi.input_type class GrpcServiceConfigArgs: def __init__(__self__, *, call_credentials: Optional[pulumi.Input['CallCredentialsArgs']] = None, channel_credentials: Optional[pulumi.Input['ChannelCredentialsArgs']] = None, target_uri: Optional[pulumi.Input[str]] = None): """ [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. :param pulumi.Input['CallCredentialsArgs'] call_credentials: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsArgs'] channel_credentials: The channel credentials to access the SDS server. :param pulumi.Input[str] target_uri: The target URI of the SDS server. """ if call_credentials is not None: pulumi.set(__self__, "call_credentials", call_credentials) if channel_credentials is not None: pulumi.set(__self__, "channel_credentials", channel_credentials) if target_uri is not None: pulumi.set(__self__, "target_uri", target_uri) @property @pulumi.getter(name="callCredentials") def call_credentials(self) -> Optional[pulumi.Input['CallCredentialsArgs']]: """ The call credentials to access the SDS server. """ return pulumi.get(self, "call_credentials") @call_credentials.setter def call_credentials(self, value: Optional[pulumi.Input['CallCredentialsArgs']]): pulumi.set(self, "call_credentials", value) @property @pulumi.getter(name="channelCredentials") def channel_credentials(self) -> Optional[pulumi.Input['ChannelCredentialsArgs']]: """ The channel credentials to access the SDS server. """ return pulumi.get(self, "channel_credentials") @channel_credentials.setter def channel_credentials(self, value: Optional[pulumi.Input['ChannelCredentialsArgs']]): pulumi.set(self, "channel_credentials", value) @property @pulumi.getter(name="targetUri") def target_uri(self) -> Optional[pulumi.Input[str]]: """ The target URI of the SDS server. """ return pulumi.get(self, "target_uri") @target_uri.setter def target_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target_uri", value) @pulumi.input_type class GuestOsFeatureArgs: def __init__(__self__, *, type: Optional[pulumi.Input['GuestOsFeatureType']] = None): """ Guest OS features. :param pulumi.Input['GuestOsFeatureType'] type: The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. """ if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['GuestOsFeatureType']]: """ The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['GuestOsFeatureType']]): pulumi.set(self, "type", value) @pulumi.input_type class HTTP2HealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTP2HealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTP2HealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP/2 health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTP2HealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTP2HealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTP2HealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTP/2 health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTP2HealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HTTPHealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTPHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTPHealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTPHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTPHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTPHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTPHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTP health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTPHealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HTTPSHealthCheckArgs: def __init__(__self__, *, host: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']] = None, request_path: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None, weight_report_mode: Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']] = None): """ :param pulumi.Input[str] host: The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPSHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPSHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTPS health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPSHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. """ if host is not None: pulumi.set(__self__, "host", host) if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request_path is not None: pulumi.set(__self__, "request_path", request_path) if response is not None: pulumi.set(__self__, "response", response) if weight_report_mode is not None: pulumi.set(__self__, "weight_report_mode", weight_report_mode) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['HTTPSHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['HTTPSHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter(name="requestPath") def request_path(self) -> Optional[pulumi.Input[str]]: """ The request path of the HTTPS health check request. The default value is /. """ return pulumi.get(self, "request_path") @request_path.setter def request_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_path", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @property @pulumi.getter(name="weightReportMode") def weight_report_mode(self) -> Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']]: """ Weight report mode. used for weighted Load Balancing. """ return pulumi.get(self, "weight_report_mode") @weight_report_mode.setter def weight_report_mode(self, value: Optional[pulumi.Input['HTTPSHealthCheckWeightReportMode']]): pulumi.set(self, "weight_report_mode", value) @pulumi.input_type class HealthCheckLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None): """ Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. """ if enable is not None: pulumi.set(__self__, "enable", enable) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @pulumi.input_type class HostRuleArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, path_matcher: Optional[pulumi.Input[str]] = None): """ UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[str] path_matcher: The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. """ if description is not None: pulumi.set(__self__, "description", description) if hosts is not None: pulumi.set(__self__, "hosts", hosts) if path_matcher is not None: pulumi.set(__self__, "path_matcher", path_matcher) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "hosts") @hosts.setter def hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "hosts", value) @property @pulumi.getter(name="pathMatcher") def path_matcher(self) -> Optional[pulumi.Input[str]]: """ The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. """ return pulumi.get(self, "path_matcher") @path_matcher.setter def path_matcher(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_matcher", value) @pulumi.input_type class HttpFaultAbortArgs: def __init__(__self__, *, http_status: Optional[pulumi.Input[int]] = None, percentage: Optional[pulumi.Input[float]] = None): """ Specification for how requests are aborted as part of fault injection. :param pulumi.Input[int] http_status: The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ if http_status is not None: pulumi.set(__self__, "http_status", http_status) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter(name="httpStatus") def http_status(self) -> Optional[pulumi.Input[int]]: """ The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. """ return pulumi.get(self, "http_status") @http_status.setter def http_status(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "http_status", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[float]]: """ The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "percentage", value) @pulumi.input_type class HttpFaultDelayArgs: def __init__(__self__, *, fixed_delay: Optional[pulumi.Input['DurationArgs']] = None, percentage: Optional[pulumi.Input[float]] = None): """ Specifies the delay introduced by the load balancer before forwarding the request to the backend service as part of fault injection. :param pulumi.Input['DurationArgs'] fixed_delay: Specifies the value of the fixed delay interval. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ if fixed_delay is not None: pulumi.set(__self__, "fixed_delay", fixed_delay) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter(name="fixedDelay") def fixed_delay(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the value of the fixed delay interval. """ return pulumi.get(self, "fixed_delay") @fixed_delay.setter def fixed_delay(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "fixed_delay", value) @property @pulumi.getter def percentage(self) -> Optional[pulumi.Input[float]]: """ The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. """ return pulumi.get(self, "percentage") @percentage.setter def percentage(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "percentage", value) @pulumi.input_type class HttpFaultInjectionArgs: def __init__(__self__, *, abort: Optional[pulumi.Input['HttpFaultAbortArgs']] = None, delay: Optional[pulumi.Input['HttpFaultDelayArgs']] = None): """ The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by the load balancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. :param pulumi.Input['HttpFaultAbortArgs'] abort: The specification for how client requests are aborted as part of fault injection. :param pulumi.Input['HttpFaultDelayArgs'] delay: The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. """ if abort is not None: pulumi.set(__self__, "abort", abort) if delay is not None: pulumi.set(__self__, "delay", delay) @property @pulumi.getter def abort(self) -> Optional[pulumi.Input['HttpFaultAbortArgs']]: """ The specification for how client requests are aborted as part of fault injection. """ return pulumi.get(self, "abort") @abort.setter def abort(self, value: Optional[pulumi.Input['HttpFaultAbortArgs']]): pulumi.set(self, "abort", value) @property @pulumi.getter def delay(self) -> Optional[pulumi.Input['HttpFaultDelayArgs']]: """ The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. """ return pulumi.get(self, "delay") @delay.setter def delay(self, value: Optional[pulumi.Input['HttpFaultDelayArgs']]): pulumi.set(self, "delay", value) @pulumi.input_type class HttpFilterConfigArgs: def __init__(__self__, *, config: Optional[pulumi.Input[str]] = None, config_type_url: Optional[pulumi.Input[str]] = None, filter_name: Optional[pulumi.Input[str]] = None): """ HttpFilterConfiguration supplies additional contextual settings for networkservices.HttpFilter resources enabled by Traffic Director. :param pulumi.Input[str] config: The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl :param pulumi.Input[str] config_type_url: The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct :param pulumi.Input[str] filter_name: Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm """ if config is not None: pulumi.set(__self__, "config", config) if config_type_url is not None: pulumi.set(__self__, "config_type_url", config_type_url) if filter_name is not None: pulumi.set(__self__, "filter_name", filter_name) @property @pulumi.getter def config(self) -> Optional[pulumi.Input[str]]: """ The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl """ return pulumi.get(self, "config") @config.setter def config(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "config", value) @property @pulumi.getter(name="configTypeUrl") def config_type_url(self) -> Optional[pulumi.Input[str]]: """ The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct """ return pulumi.get(self, "config_type_url") @config_type_url.setter def config_type_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "config_type_url", value) @property @pulumi.getter(name="filterName") def filter_name(self) -> Optional[pulumi.Input[str]]: """ Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm """ return pulumi.get(self, "filter_name") @filter_name.setter def filter_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter_name", value) @pulumi.input_type class HttpHeaderActionArgs: def __init__(__self__, *, request_headers_to_add: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]] = None, request_headers_to_remove: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, response_headers_to_add: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]] = None, response_headers_to_remove: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The request and response header transformations that take effect before the request is passed along to the selected backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] request_headers_to_add: Headers to add to a matching request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input[str]]] request_headers_to_remove: A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] response_headers_to_add: Headers to add the response before sending the response back to the client. :param pulumi.Input[Sequence[pulumi.Input[str]]] response_headers_to_remove: A list of header names for headers that need to be removed from the response before sending the response back to the client. """ if request_headers_to_add is not None: pulumi.set(__self__, "request_headers_to_add", request_headers_to_add) if request_headers_to_remove is not None: pulumi.set(__self__, "request_headers_to_remove", request_headers_to_remove) if response_headers_to_add is not None: pulumi.set(__self__, "response_headers_to_add", response_headers_to_add) if response_headers_to_remove is not None: pulumi.set(__self__, "response_headers_to_remove", response_headers_to_remove) @property @pulumi.getter(name="requestHeadersToAdd") def request_headers_to_add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]: """ Headers to add to a matching request before forwarding the request to the backendService. """ return pulumi.get(self, "request_headers_to_add") @request_headers_to_add.setter def request_headers_to_add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]): pulumi.set(self, "request_headers_to_add", value) @property @pulumi.getter(name="requestHeadersToRemove") def request_headers_to_remove(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. """ return pulumi.get(self, "request_headers_to_remove") @request_headers_to_remove.setter def request_headers_to_remove(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "request_headers_to_remove", value) @property @pulumi.getter(name="responseHeadersToAdd") def response_headers_to_add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]: """ Headers to add the response before sending the response back to the client. """ return pulumi.get(self, "response_headers_to_add") @response_headers_to_add.setter def response_headers_to_add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]]]): pulumi.set(self, "response_headers_to_add", value) @property @pulumi.getter(name="responseHeadersToRemove") def response_headers_to_remove(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of header names for headers that need to be removed from the response before sending the response back to the client. """ return pulumi.get(self, "response_headers_to_remove") @response_headers_to_remove.setter def response_headers_to_remove(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "response_headers_to_remove", value) @pulumi.input_type class HttpHeaderMatchArgs: def __init__(__self__, *, exact_match: Optional[pulumi.Input[str]] = None, header_name: Optional[pulumi.Input[str]] = None, invert_match: Optional[pulumi.Input[bool]] = None, prefix_match: Optional[pulumi.Input[str]] = None, present_match: Optional[pulumi.Input[bool]] = None, range_match: Optional[pulumi.Input['Int64RangeMatchArgs']] = None, regex_match: Optional[pulumi.Input[str]] = None, suffix_match: Optional[pulumi.Input[str]] = None): """ matchRule criteria for request header matches. :param pulumi.Input[str] exact_match: The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[str] header_name: The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. :param pulumi.Input[bool] invert_match: If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. :param pulumi.Input[str] prefix_match: The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[bool] present_match: A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input['Int64RangeMatchArgs'] range_match: The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. :param pulumi.Input[str] regex_match: The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[str] suffix_match: The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ if exact_match is not None: pulumi.set(__self__, "exact_match", exact_match) if header_name is not None: pulumi.set(__self__, "header_name", header_name) if invert_match is not None: pulumi.set(__self__, "invert_match", invert_match) if prefix_match is not None: pulumi.set(__self__, "prefix_match", prefix_match) if present_match is not None: pulumi.set(__self__, "present_match", present_match) if range_match is not None: pulumi.set(__self__, "range_match", range_match) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) if suffix_match is not None: pulumi.set(__self__, "suffix_match", suffix_match) @property @pulumi.getter(name="exactMatch") def exact_match(self) -> Optional[pulumi.Input[str]]: """ The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "exact_match") @exact_match.setter def exact_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exact_match", value) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="invertMatch") def invert_match(self) -> Optional[pulumi.Input[bool]]: """ If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. """ return pulumi.get(self, "invert_match") @invert_match.setter def invert_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "invert_match", value) @property @pulumi.getter(name="prefixMatch") def prefix_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "prefix_match") @prefix_match.setter def prefix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_match", value) @property @pulumi.getter(name="presentMatch") def present_match(self) -> Optional[pulumi.Input[bool]]: """ A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "present_match") @present_match.setter def present_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "present_match", value) @property @pulumi.getter(name="rangeMatch") def range_match(self) -> Optional[pulumi.Input['Int64RangeMatchArgs']]: """ The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. """ return pulumi.get(self, "range_match") @range_match.setter def range_match(self, value: Optional[pulumi.Input['Int64RangeMatchArgs']]): pulumi.set(self, "range_match", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @property @pulumi.getter(name="suffixMatch") def suffix_match(self) -> Optional[pulumi.Input[str]]: """ The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. """ return pulumi.get(self, "suffix_match") @suffix_match.setter def suffix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "suffix_match", value) @pulumi.input_type class HttpHeaderOptionArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None, header_value: Optional[pulumi.Input[str]] = None, replace: Optional[pulumi.Input[bool]] = None): """ Specification determining how headers are added to requests or responses. :param pulumi.Input[str] header_name: The name of the header. :param pulumi.Input[str] header_value: The value of the header to add. :param pulumi.Input[bool] replace: If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) if header_value is not None: pulumi.set(__self__, "header_value", header_value) if replace is not None: pulumi.set(__self__, "replace", replace) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the header. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="headerValue") def header_value(self) -> Optional[pulumi.Input[str]]: """ The value of the header to add. """ return pulumi.get(self, "header_value") @header_value.setter def header_value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_value", value) @property @pulumi.getter def replace(self) -> Optional[pulumi.Input[bool]]: """ If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. """ return pulumi.get(self, "replace") @replace.setter def replace(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "replace", value) @pulumi.input_type class HttpQueryParameterMatchArgs: def __init__(__self__, *, exact_match: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, present_match: Optional[pulumi.Input[bool]] = None, regex_match: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch criteria for a request's query parameter. :param pulumi.Input[str] exact_match: The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] name: The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. :param pulumi.Input[bool] present_match: Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] regex_match: The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. """ if exact_match is not None: pulumi.set(__self__, "exact_match", exact_match) if name is not None: pulumi.set(__self__, "name", name) if present_match is not None: pulumi.set(__self__, "present_match", present_match) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) @property @pulumi.getter(name="exactMatch") def exact_match(self) -> Optional[pulumi.Input[str]]: """ The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. """ return pulumi.get(self, "exact_match") @exact_match.setter def exact_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exact_match", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="presentMatch") def present_match(self) -> Optional[pulumi.Input[bool]]: """ Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. """ return pulumi.get(self, "present_match") @present_match.setter def present_match(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "present_match", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @pulumi.input_type class HttpRedirectActionArgs: def __init__(__self__, *, host_redirect: Optional[pulumi.Input[str]] = None, https_redirect: Optional[pulumi.Input[bool]] = None, path_redirect: Optional[pulumi.Input[str]] = None, prefix_redirect: Optional[pulumi.Input[str]] = None, redirect_response_code: Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']] = None, strip_query: Optional[pulumi.Input[bool]] = None): """ Specifies settings for an HTTP redirect. :param pulumi.Input[str] host_redirect: The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. :param pulumi.Input[bool] https_redirect: If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. :param pulumi.Input[str] path_redirect: The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input[str] prefix_redirect: The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input['HttpRedirectActionRedirectResponseCode'] redirect_response_code: The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. :param pulumi.Input[bool] strip_query: If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. """ if host_redirect is not None: pulumi.set(__self__, "host_redirect", host_redirect) if https_redirect is not None: pulumi.set(__self__, "https_redirect", https_redirect) if path_redirect is not None: pulumi.set(__self__, "path_redirect", path_redirect) if prefix_redirect is not None: pulumi.set(__self__, "prefix_redirect", prefix_redirect) if redirect_response_code is not None: pulumi.set(__self__, "redirect_response_code", redirect_response_code) if strip_query is not None: pulumi.set(__self__, "strip_query", strip_query) @property @pulumi.getter(name="hostRedirect") def host_redirect(self) -> Optional[pulumi.Input[str]]: """ The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. """ return pulumi.get(self, "host_redirect") @host_redirect.setter def host_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_redirect", value) @property @pulumi.getter(name="httpsRedirect") def https_redirect(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. """ return pulumi.get(self, "https_redirect") @https_redirect.setter def https_redirect(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "https_redirect", value) @property @pulumi.getter(name="pathRedirect") def path_redirect(self) -> Optional[pulumi.Input[str]]: """ The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "path_redirect") @path_redirect.setter def path_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_redirect", value) @property @pulumi.getter(name="prefixRedirect") def prefix_redirect(self) -> Optional[pulumi.Input[str]]: """ The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "prefix_redirect") @prefix_redirect.setter def prefix_redirect(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_redirect", value) @property @pulumi.getter(name="redirectResponseCode") def redirect_response_code(self) -> Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']]: """ The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. """ return pulumi.get(self, "redirect_response_code") @redirect_response_code.setter def redirect_response_code(self, value: Optional[pulumi.Input['HttpRedirectActionRedirectResponseCode']]): pulumi.set(self, "redirect_response_code", value) @property @pulumi.getter(name="stripQuery") def strip_query(self) -> Optional[pulumi.Input[bool]]: """ If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. """ return pulumi.get(self, "strip_query") @strip_query.setter def strip_query(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "strip_query", value) @pulumi.input_type class HttpRetryPolicyArgs: def __init__(__self__, *, num_retries: Optional[pulumi.Input[int]] = None, per_try_timeout: Optional[pulumi.Input['DurationArgs']] = None, retry_conditions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The retry policy associates with HttpRouteRule :param pulumi.Input[int] num_retries: Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. :param pulumi.Input['DurationArgs'] per_try_timeout: Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input[str]]] retry_conditions: Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable """ if num_retries is not None: pulumi.set(__self__, "num_retries", num_retries) if per_try_timeout is not None: pulumi.set(__self__, "per_try_timeout", per_try_timeout) if retry_conditions is not None: pulumi.set(__self__, "retry_conditions", retry_conditions) @property @pulumi.getter(name="numRetries") def num_retries(self) -> Optional[pulumi.Input[int]]: """ Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. """ return pulumi.get(self, "num_retries") @num_retries.setter def num_retries(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "num_retries", value) @property @pulumi.getter(name="perTryTimeout") def per_try_timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "per_try_timeout") @per_try_timeout.setter def per_try_timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "per_try_timeout", value) @property @pulumi.getter(name="retryConditions") def retry_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable """ return pulumi.get(self, "retry_conditions") @retry_conditions.setter def retry_conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "retry_conditions", value) @pulumi.input_type class HttpRouteActionArgs: def __init__(__self__, *, cors_policy: Optional[pulumi.Input['CorsPolicyArgs']] = None, fault_injection_policy: Optional[pulumi.Input['HttpFaultInjectionArgs']] = None, max_stream_duration: Optional[pulumi.Input['DurationArgs']] = None, request_mirror_policy: Optional[pulumi.Input['RequestMirrorPolicyArgs']] = None, retry_policy: Optional[pulumi.Input['HttpRetryPolicyArgs']] = None, timeout: Optional[pulumi.Input['DurationArgs']] = None, url_rewrite: Optional[pulumi.Input['UrlRewriteArgs']] = None, weighted_backend_services: Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]] = None): """ :param pulumi.Input['CorsPolicyArgs'] cors_policy: The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input['HttpFaultInjectionArgs'] fault_injection_policy: The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. :param pulumi.Input['DurationArgs'] max_stream_duration: Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input['RequestMirrorPolicyArgs'] request_mirror_policy: Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input['HttpRetryPolicyArgs'] retry_policy: Specifies the retry policy associated with this route. :param pulumi.Input['DurationArgs'] timeout: Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input['UrlRewriteArgs'] url_rewrite: The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]] weighted_backend_services: A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. """ if cors_policy is not None: pulumi.set(__self__, "cors_policy", cors_policy) if fault_injection_policy is not None: pulumi.set(__self__, "fault_injection_policy", fault_injection_policy) if max_stream_duration is not None: pulumi.set(__self__, "max_stream_duration", max_stream_duration) if request_mirror_policy is not None: pulumi.set(__self__, "request_mirror_policy", request_mirror_policy) if retry_policy is not None: pulumi.set(__self__, "retry_policy", retry_policy) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if url_rewrite is not None: pulumi.set(__self__, "url_rewrite", url_rewrite) if weighted_backend_services is not None: pulumi.set(__self__, "weighted_backend_services", weighted_backend_services) @property @pulumi.getter(name="corsPolicy") def cors_policy(self) -> Optional[pulumi.Input['CorsPolicyArgs']]: """ The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "cors_policy") @cors_policy.setter def cors_policy(self, value: Optional[pulumi.Input['CorsPolicyArgs']]): pulumi.set(self, "cors_policy", value) @property @pulumi.getter(name="faultInjectionPolicy") def fault_injection_policy(self) -> Optional[pulumi.Input['HttpFaultInjectionArgs']]: """ The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. """ return pulumi.get(self, "fault_injection_policy") @fault_injection_policy.setter def fault_injection_policy(self, value: Optional[pulumi.Input['HttpFaultInjectionArgs']]): pulumi.set(self, "fault_injection_policy", value) @property @pulumi.getter(name="maxStreamDuration") def max_stream_duration(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "max_stream_duration") @max_stream_duration.setter def max_stream_duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "max_stream_duration", value) @property @pulumi.getter(name="requestMirrorPolicy") def request_mirror_policy(self) -> Optional[pulumi.Input['RequestMirrorPolicyArgs']]: """ Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "request_mirror_policy") @request_mirror_policy.setter def request_mirror_policy(self, value: Optional[pulumi.Input['RequestMirrorPolicyArgs']]): pulumi.set(self, "request_mirror_policy", value) @property @pulumi.getter(name="retryPolicy") def retry_policy(self) -> Optional[pulumi.Input['HttpRetryPolicyArgs']]: """ Specifies the retry policy associated with this route. """ return pulumi.get(self, "retry_policy") @retry_policy.setter def retry_policy(self, value: Optional[pulumi.Input['HttpRetryPolicyArgs']]): pulumi.set(self, "retry_policy", value) @property @pulumi.getter def timeout(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "timeout") @timeout.setter def timeout(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "timeout", value) @property @pulumi.getter(name="urlRewrite") def url_rewrite(self) -> Optional[pulumi.Input['UrlRewriteArgs']]: """ The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. """ return pulumi.get(self, "url_rewrite") @url_rewrite.setter def url_rewrite(self, value: Optional[pulumi.Input['UrlRewriteArgs']]): pulumi.set(self, "url_rewrite", value) @property @pulumi.getter(name="weightedBackendServices") def weighted_backend_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]]: """ A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. """ return pulumi.get(self, "weighted_backend_services") @weighted_backend_services.setter def weighted_backend_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]]]): pulumi.set(self, "weighted_backend_services", value) @pulumi.input_type class HttpRouteRuleMatchArgs: def __init__(__self__, *, full_path_match: Optional[pulumi.Input[str]] = None, header_matches: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]] = None, ignore_case: Optional[pulumi.Input[bool]] = None, metadata_filters: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]] = None, prefix_match: Optional[pulumi.Input[str]] = None, query_parameter_matches: Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]] = None, regex_match: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur. :param pulumi.Input[str] full_path_match: For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]] header_matches: Specifies a list of header match criteria, all of which must match corresponding headers in the request. :param pulumi.Input[bool] ignore_case: Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]] metadata_filters: Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] prefix_match: For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]] query_parameter_matches: Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] regex_match: For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ if full_path_match is not None: pulumi.set(__self__, "full_path_match", full_path_match) if header_matches is not None: pulumi.set(__self__, "header_matches", header_matches) if ignore_case is not None: pulumi.set(__self__, "ignore_case", ignore_case) if metadata_filters is not None: pulumi.set(__self__, "metadata_filters", metadata_filters) if prefix_match is not None: pulumi.set(__self__, "prefix_match", prefix_match) if query_parameter_matches is not None: pulumi.set(__self__, "query_parameter_matches", query_parameter_matches) if regex_match is not None: pulumi.set(__self__, "regex_match", regex_match) @property @pulumi.getter(name="fullPathMatch") def full_path_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. """ return pulumi.get(self, "full_path_match") @full_path_match.setter def full_path_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "full_path_match", value) @property @pulumi.getter(name="headerMatches") def header_matches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]]: """ Specifies a list of header match criteria, all of which must match corresponding headers in the request. """ return pulumi.get(self, "header_matches") @header_matches.setter def header_matches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]]]): pulumi.set(self, "header_matches", value) @property @pulumi.getter(name="ignoreCase") def ignore_case(self) -> Optional[pulumi.Input[bool]]: """ Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "ignore_case") @ignore_case.setter def ignore_case(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ignore_case", value) @property @pulumi.getter(name="metadataFilters") def metadata_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]]: """ Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "metadata_filters") @metadata_filters.setter def metadata_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]]]): pulumi.set(self, "metadata_filters", value) @property @pulumi.getter(name="prefixMatch") def prefix_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. """ return pulumi.get(self, "prefix_match") @prefix_match.setter def prefix_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "prefix_match", value) @property @pulumi.getter(name="queryParameterMatches") def query_parameter_matches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]]: """ Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "query_parameter_matches") @query_parameter_matches.setter def query_parameter_matches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]]]): pulumi.set(self, "query_parameter_matches", value) @property @pulumi.getter(name="regexMatch") def regex_match(self) -> Optional[pulumi.Input[str]]: """ For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. """ return pulumi.get(self, "regex_match") @regex_match.setter def regex_match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "regex_match", value) @pulumi.input_type class HttpRouteRuleArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, http_filter_configs: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]] = None, http_filter_metadata: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]] = None, match_rules: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]] = None, priority: Optional[pulumi.Input[int]] = None, route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, service: Optional[pulumi.Input[str]] = None, url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None): """ The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform. :param pulumi.Input[str] description: The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_configs: Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_metadata: Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]] match_rules: The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. :param pulumi.Input[int] priority: For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ if description is not None: pulumi.set(__self__, "description", description) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if http_filter_configs is not None: pulumi.set(__self__, "http_filter_configs", http_filter_configs) if http_filter_metadata is not None: pulumi.set(__self__, "http_filter_metadata", http_filter_metadata) if match_rules is not None: pulumi.set(__self__, "match_rules", match_rules) if priority is not None: pulumi.set(__self__, "priority", priority) if route_action is not None: pulumi.set(__self__, "route_action", route_action) if service is not None: pulumi.set(__self__, "service", service) if url_redirect is not None: pulumi.set(__self__, "url_redirect", url_redirect) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter(name="httpFilterConfigs") def http_filter_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]: """ Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_filter_configs") @http_filter_configs.setter def http_filter_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]): pulumi.set(self, "http_filter_configs", value) @property @pulumi.getter(name="httpFilterMetadata") def http_filter_metadata(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]: """ Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "http_filter_metadata") @http_filter_metadata.setter def http_filter_metadata(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]]]): pulumi.set(self, "http_filter_metadata", value) @property @pulumi.getter(name="matchRules") def match_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]]: """ The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. """ return pulumi.get(self, "match_rules") @match_rules.setter def match_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]]]): pulumi.set(self, "match_rules", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="routeAction") def route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. """ return pulumi.get(self, "route_action") @route_action.setter def route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "route_action", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlRedirect") def url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "url_redirect") @url_redirect.setter def url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "url_redirect", value) @pulumi.input_type class ImageRawDiskArgs: def __init__(__self__, *, container_type: Optional[pulumi.Input['ImageRawDiskContainerType']] = None, source: Optional[pulumi.Input[str]] = None): """ The parameters of the raw disk image. :param pulumi.Input['ImageRawDiskContainerType'] container_type: The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. :param pulumi.Input[str] source: The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL """ if container_type is not None: pulumi.set(__self__, "container_type", container_type) if source is not None: pulumi.set(__self__, "source", source) @property @pulumi.getter(name="containerType") def container_type(self) -> Optional[pulumi.Input['ImageRawDiskContainerType']]: """ The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. """ return pulumi.get(self, "container_type") @container_type.setter def container_type(self, value: Optional[pulumi.Input['ImageRawDiskContainerType']]): pulumi.set(self, "container_type", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input[str]]: """ The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source", value) @pulumi.input_type class InitialStateConfigArgs: def __init__(__self__, *, dbs: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, dbxs: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, keks: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]] = None, pk: Optional[pulumi.Input['FileContentBufferArgs']] = None): """ Initial State for shielded instance, these are public keys which are safe to store in public :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbs: The Key Database (db). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbxs: The forbidden key database (dbx). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] keks: The Key Exchange Key (KEK). :param pulumi.Input['FileContentBufferArgs'] pk: The Platform Key (PK). """ if dbs is not None: pulumi.set(__self__, "dbs", dbs) if dbxs is not None: pulumi.set(__self__, "dbxs", dbxs) if keks is not None: pulumi.set(__self__, "keks", keks) if pk is not None: pulumi.set(__self__, "pk", pk) @property @pulumi.getter def dbs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The Key Database (db). """ return pulumi.get(self, "dbs") @dbs.setter def dbs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "dbs", value) @property @pulumi.getter def dbxs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The forbidden key database (dbx). """ return pulumi.get(self, "dbxs") @dbxs.setter def dbxs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "dbxs", value) @property @pulumi.getter def keks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]: """ The Key Exchange Key (KEK). """ return pulumi.get(self, "keks") @keks.setter def keks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]]]): pulumi.set(self, "keks", value) @property @pulumi.getter def pk(self) -> Optional[pulumi.Input['FileContentBufferArgs']]: """ The Platform Key (PK). """ return pulumi.get(self, "pk") @pk.setter def pk(self, value: Optional[pulumi.Input['FileContentBufferArgs']]): pulumi.set(self, "pk", value) @pulumi.input_type class InstanceGroupManagerAllInstancesConfigArgs: def __init__(__self__, *, properties: Optional[pulumi.Input['InstancePropertiesPatchArgs']] = None): """ :param pulumi.Input['InstancePropertiesPatchArgs'] properties: Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. """ if properties is not None: pulumi.set(__self__, "properties", properties) @property @pulumi.getter def properties(self) -> Optional[pulumi.Input['InstancePropertiesPatchArgs']]: """ Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. """ return pulumi.get(self, "properties") @properties.setter def properties(self, value: Optional[pulumi.Input['InstancePropertiesPatchArgs']]): pulumi.set(self, "properties", value) @pulumi.input_type class InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs: def __init__(__self__, *, on_health_check: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']] = None): """ :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck'] on_health_check: If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. """ if on_health_check is not None: pulumi.set(__self__, "on_health_check", on_health_check) @property @pulumi.getter(name="onHealthCheck") def on_health_check(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']]: """ If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. """ return pulumi.get(self, "on_health_check") @on_health_check.setter def on_health_check(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck']]): pulumi.set(self, "on_health_check", value) @pulumi.input_type class InstanceGroupManagerAutoHealingPolicyArgs: def __init__(__self__, *, auto_healing_triggers: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']] = None, health_check: Optional[pulumi.Input[str]] = None, initial_delay_sec: Optional[pulumi.Input[int]] = None, max_unavailable: Optional[pulumi.Input['FixedOrPercentArgs']] = None, update_instances: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']] = None): """ :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs'] auto_healing_triggers: Restricts what triggers autohealing. :param pulumi.Input[str] health_check: The URL for the health check that signals autohealing. :param pulumi.Input[int] initial_delay_sec: The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. """ if auto_healing_triggers is not None: pulumi.set(__self__, "auto_healing_triggers", auto_healing_triggers) if health_check is not None: pulumi.set(__self__, "health_check", health_check) if initial_delay_sec is not None: pulumi.set(__self__, "initial_delay_sec", initial_delay_sec) if max_unavailable is not None: pulumi.set(__self__, "max_unavailable", max_unavailable) if update_instances is not None: pulumi.set(__self__, "update_instances", update_instances) @property @pulumi.getter(name="autoHealingTriggers") def auto_healing_triggers(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']]: """ Restricts what triggers autohealing. """ return pulumi.get(self, "auto_healing_triggers") @auto_healing_triggers.setter def auto_healing_triggers(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs']]): pulumi.set(self, "auto_healing_triggers", value) @property @pulumi.getter(name="healthCheck") def health_check(self) -> Optional[pulumi.Input[str]]: """ The URL for the health check that signals autohealing. """ return pulumi.get(self, "health_check") @health_check.setter def health_check(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "health_check", value) @property @pulumi.getter(name="initialDelaySec") def initial_delay_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. """ return pulumi.get(self, "initial_delay_sec") @initial_delay_sec.setter def initial_delay_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "initial_delay_sec", value) @property @pulumi.getter(name="maxUnavailable") def max_unavailable(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. """ return pulumi.get(self, "max_unavailable") @max_unavailable.setter def max_unavailable(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_unavailable", value) @property @pulumi.getter(name="updateInstances") def update_instances(self) -> Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']]: return pulumi.get(self, "update_instances") @update_instances.setter def update_instances(self, value: Optional[pulumi.Input['InstanceGroupManagerAutoHealingPolicyUpdateInstances']]): pulumi.set(self, "update_instances", value) @pulumi.input_type class InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs: def __init__(__self__, *, timeout_sec: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] timeout_sec: The number of seconds to wait for a readiness signal during initialization before timing out. """ if timeout_sec is not None: pulumi.set(__self__, "timeout_sec", timeout_sec) @property @pulumi.getter(name="timeoutSec") def timeout_sec(self) -> Optional[pulumi.Input[int]]: """ The number of seconds to wait for a readiness signal during initialization before timing out. """ return pulumi.get(self, "timeout_sec") @timeout_sec.setter def timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "timeout_sec", value) @pulumi.input_type class InstanceGroupManagerInstanceLifecyclePolicyArgs: def __init__(__self__, *, metadata_based_readiness_signal: Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']] = None): """ :param pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs'] metadata_based_readiness_signal: The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. """ if metadata_based_readiness_signal is not None: pulumi.set(__self__, "metadata_based_readiness_signal", metadata_based_readiness_signal) @property @pulumi.getter(name="metadataBasedReadinessSignal") def metadata_based_readiness_signal(self) -> Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']]: """ The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. """ return pulumi.get(self, "metadata_based_readiness_signal") @metadata_based_readiness_signal.setter def metadata_based_readiness_signal(self, value: Optional[pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs']]): pulumi.set(self, "metadata_based_readiness_signal", value) @pulumi.input_type class InstanceGroupManagerStandbyPolicyArgs: def __init__(__self__, *, initial_delay_sec: Optional[pulumi.Input[int]] = None): if initial_delay_sec is not None: pulumi.set(__self__, "initial_delay_sec", initial_delay_sec) @property @pulumi.getter(name="initialDelaySec") def initial_delay_sec(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "initial_delay_sec") @initial_delay_sec.setter def initial_delay_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "initial_delay_sec", value) @pulumi.input_type class InstanceGroupManagerUpdatePolicyArgs: def __init__(__self__, *, instance_redistribution_type: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']] = None, max_surge: Optional[pulumi.Input['FixedOrPercentArgs']] = None, max_unavailable: Optional[pulumi.Input['FixedOrPercentArgs']] = None, min_ready_sec: Optional[pulumi.Input[int]] = None, minimal_action: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']] = None, most_disruptive_allowed_action: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']] = None, replacement_method: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']] = None, type: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']] = None): """ :param pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType'] instance_redistribution_type: The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. :param pulumi.Input['FixedOrPercentArgs'] max_surge: The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. :param pulumi.Input[int] min_ready_sec: Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction'] minimal_action: Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction'] most_disruptive_allowed_action: Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. :param pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod'] replacement_method: What action should be used to replace instances. See minimal_action.REPLACE :param pulumi.Input['InstanceGroupManagerUpdatePolicyType'] type: The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). """ if instance_redistribution_type is not None: pulumi.set(__self__, "instance_redistribution_type", instance_redistribution_type) if max_surge is not None: pulumi.set(__self__, "max_surge", max_surge) if max_unavailable is not None: pulumi.set(__self__, "max_unavailable", max_unavailable) if min_ready_sec is not None: pulumi.set(__self__, "min_ready_sec", min_ready_sec) if minimal_action is not None: pulumi.set(__self__, "minimal_action", minimal_action) if most_disruptive_allowed_action is not None: pulumi.set(__self__, "most_disruptive_allowed_action", most_disruptive_allowed_action) if replacement_method is not None: pulumi.set(__self__, "replacement_method", replacement_method) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="instanceRedistributionType") def instance_redistribution_type(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']]: """ The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. """ return pulumi.get(self, "instance_redistribution_type") @instance_redistribution_type.setter def instance_redistribution_type(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType']]): pulumi.set(self, "instance_redistribution_type", value) @property @pulumi.getter(name="maxSurge") def max_surge(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. """ return pulumi.get(self, "max_surge") @max_surge.setter def max_surge(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_surge", value) @property @pulumi.getter(name="maxUnavailable") def max_unavailable(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. """ return pulumi.get(self, "max_unavailable") @max_unavailable.setter def max_unavailable(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "max_unavailable", value) @property @pulumi.getter(name="minReadySec") def min_ready_sec(self) -> Optional[pulumi.Input[int]]: """ Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. """ return pulumi.get(self, "min_ready_sec") @min_ready_sec.setter def min_ready_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_ready_sec", value) @property @pulumi.getter(name="minimalAction") def minimal_action(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']]: """ Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. """ return pulumi.get(self, "minimal_action") @minimal_action.setter def minimal_action(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction']]): pulumi.set(self, "minimal_action", value) @property @pulumi.getter(name="mostDisruptiveAllowedAction") def most_disruptive_allowed_action(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']]: """ Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. """ return pulumi.get(self, "most_disruptive_allowed_action") @most_disruptive_allowed_action.setter def most_disruptive_allowed_action(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction']]): pulumi.set(self, "most_disruptive_allowed_action", value) @property @pulumi.getter(name="replacementMethod") def replacement_method(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']]: """ What action should be used to replace instances. See minimal_action.REPLACE """ return pulumi.get(self, "replacement_method") @replacement_method.setter def replacement_method(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod']]): pulumi.set(self, "replacement_method", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']]: """ The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyType']]): pulumi.set(self, "type", value) @pulumi.input_type class InstanceGroupManagerVersionArgs: def __init__(__self__, *, instance_template: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, target_size: Optional[pulumi.Input['FixedOrPercentArgs']] = None): """ :param pulumi.Input[str] instance_template: The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. :param pulumi.Input[str] name: Name of the version. Unique among all versions in the scope of this managed instance group. :param pulumi.Input['FixedOrPercentArgs'] target_size: Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. """ if instance_template is not None: pulumi.set(__self__, "instance_template", instance_template) if name is not None: pulumi.set(__self__, "name", name) if target_size is not None: pulumi.set(__self__, "target_size", target_size) @property @pulumi.getter(name="instanceTemplate") def instance_template(self) -> Optional[pulumi.Input[str]]: """ The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. """ return pulumi.get(self, "instance_template") @instance_template.setter def instance_template(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "instance_template", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the version. Unique among all versions in the scope of this managed instance group. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="targetSize") def target_size(self) -> Optional[pulumi.Input['FixedOrPercentArgs']]: """ Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. """ return pulumi.get(self, "target_size") @target_size.setter def target_size(self, value: Optional[pulumi.Input['FixedOrPercentArgs']]): pulumi.set(self, "target_size", value) @pulumi.input_type class InstanceParamsArgs: def __init__(__self__, *, resource_manager_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Additional instance params. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ return pulumi.get(self, "resource_manager_tags") @resource_manager_tags.setter def resource_manager_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "resource_manager_tags", value) @pulumi.input_type class InstancePropertiesPatchArgs: def __init__(__self__, *, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Represents the change that you want to make to the instance properties. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The label key-value pairs that you want to patch onto the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. """ if labels is not None: pulumi.set(__self__, "labels", labels) if metadata is not None: pulumi.set(__self__, "metadata", metadata) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ The label key-value pairs that you want to patch onto the instance. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "metadata", value) @pulumi.input_type class InstancePropertiesArgs: def __init__(__self__, *, advanced_machine_features: Optional[pulumi.Input['AdvancedMachineFeaturesArgs']] = None, can_ip_forward: Optional[pulumi.Input[bool]] = None, confidential_instance_config: Optional[pulumi.Input['ConfidentialInstanceConfigArgs']] = None, description: Optional[pulumi.Input[str]] = None, disks: Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]] = None, display_device: Optional[pulumi.Input['DisplayDeviceArgs']] = None, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]] = None, key_revocation_action_type: Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, machine_type: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input['MetadataArgs']] = None, min_cpu_platform: Optional[pulumi.Input[str]] = None, network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]] = None, network_performance_config: Optional[pulumi.Input['NetworkPerformanceConfigArgs']] = None, post_key_revocation_action_type: Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']] = None, private_ipv6_google_access: Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']] = None, reservation_affinity: Optional[pulumi.Input['ReservationAffinityArgs']] = None, resource_manager_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, scheduling: Optional[pulumi.Input['SchedulingArgs']] = None, secure_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]] = None, shielded_instance_config: Optional[pulumi.Input['ShieldedInstanceConfigArgs']] = None, shielded_vm_config: Optional[pulumi.Input['ShieldedVmConfigArgs']] = None, tags: Optional[pulumi.Input['TagsArgs']] = None): """ :param pulumi.Input['AdvancedMachineFeaturesArgs'] advanced_machine_features: Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. :param pulumi.Input[bool] can_ip_forward: Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. :param pulumi.Input['ConfidentialInstanceConfigArgs'] confidential_instance_config: Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. :param pulumi.Input[str] description: An optional text description for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]] disks: An array of disks that are associated with the instances that are created from these properties. :param pulumi.Input['DisplayDeviceArgs'] display_device: Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: A list of guest accelerator cards' type and count to use for instances created from these properties. :param pulumi.Input['InstancePropertiesKeyRevocationActionType'] key_revocation_action_type: KeyRevocationActionType of the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to instances that are created from these properties. :param pulumi.Input[str] machine_type: The machine type to use for instances that are created from these properties. :param pulumi.Input['MetadataArgs'] metadata: The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. :param pulumi.Input[str] min_cpu_platform: Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]] network_interfaces: An array of network access configurations for this interface. :param pulumi.Input['NetworkPerformanceConfigArgs'] network_performance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['InstancePropertiesPostKeyRevocationActionType'] post_key_revocation_action_type: PostKeyRevocationActionType of the instance. :param pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess'] private_ipv6_google_access: The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. :param pulumi.Input['ReservationAffinityArgs'] reservation_affinity: Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. :param pulumi.Input['SchedulingArgs'] scheduling: Specifies the scheduling options for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input[str]]] secure_tags: [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]] service_accounts: A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. :param pulumi.Input['ShieldedInstanceConfigArgs'] shielded_instance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['ShieldedVmConfigArgs'] shielded_vm_config: Specifies the Shielded VM options for the instances that are created from these properties. :param pulumi.Input['TagsArgs'] tags: A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. """ if advanced_machine_features is not None: pulumi.set(__self__, "advanced_machine_features", advanced_machine_features) if can_ip_forward is not None: pulumi.set(__self__, "can_ip_forward", can_ip_forward) if confidential_instance_config is not None: pulumi.set(__self__, "confidential_instance_config", confidential_instance_config) if description is not None: pulumi.set(__self__, "description", description) if disks is not None: pulumi.set(__self__, "disks", disks) if display_device is not None: pulumi.set(__self__, "display_device", display_device) if guest_accelerators is not None: pulumi.set(__self__, "guest_accelerators", guest_accelerators) if key_revocation_action_type is not None: pulumi.set(__self__, "key_revocation_action_type", key_revocation_action_type) if labels is not None: pulumi.set(__self__, "labels", labels) if machine_type is not None: pulumi.set(__self__, "machine_type", machine_type) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if min_cpu_platform is not None: pulumi.set(__self__, "min_cpu_platform", min_cpu_platform) if network_interfaces is not None: pulumi.set(__self__, "network_interfaces", network_interfaces) if network_performance_config is not None: pulumi.set(__self__, "network_performance_config", network_performance_config) if post_key_revocation_action_type is not None: pulumi.set(__self__, "post_key_revocation_action_type", post_key_revocation_action_type) if private_ipv6_google_access is not None: pulumi.set(__self__, "private_ipv6_google_access", private_ipv6_google_access) if reservation_affinity is not None: pulumi.set(__self__, "reservation_affinity", reservation_affinity) if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) if resource_policies is not None: pulumi.set(__self__, "resource_policies", resource_policies) if scheduling is not None: pulumi.set(__self__, "scheduling", scheduling) if secure_tags is not None: pulumi.set(__self__, "secure_tags", secure_tags) if service_accounts is not None: pulumi.set(__self__, "service_accounts", service_accounts) if shielded_instance_config is not None: pulumi.set(__self__, "shielded_instance_config", shielded_instance_config) if shielded_vm_config is not None: pulumi.set(__self__, "shielded_vm_config", shielded_vm_config) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="advancedMachineFeatures") def advanced_machine_features(self) -> Optional[pulumi.Input['AdvancedMachineFeaturesArgs']]: """ Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "advanced_machine_features") @advanced_machine_features.setter def advanced_machine_features(self, value: Optional[pulumi.Input['AdvancedMachineFeaturesArgs']]): pulumi.set(self, "advanced_machine_features", value) @property @pulumi.getter(name="canIpForward") def can_ip_forward(self) -> Optional[pulumi.Input[bool]]: """ Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. """ return pulumi.get(self, "can_ip_forward") @can_ip_forward.setter def can_ip_forward(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "can_ip_forward", value) @property @pulumi.getter(name="confidentialInstanceConfig") def confidential_instance_config(self) -> Optional[pulumi.Input['ConfidentialInstanceConfigArgs']]: """ Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "confidential_instance_config") @confidential_instance_config.setter def confidential_instance_config(self, value: Optional[pulumi.Input['ConfidentialInstanceConfigArgs']]): pulumi.set(self, "confidential_instance_config", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional text description for the instances that are created from these properties. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]]: """ An array of disks that are associated with the instances that are created from these properties. """ return pulumi.get(self, "disks") @disks.setter def disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]]]): pulumi.set(self, "disks", value) @property @pulumi.getter(name="displayDevice") def display_device(self) -> Optional[pulumi.Input['DisplayDeviceArgs']]: """ Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "display_device") @display_device.setter def display_device(self, value: Optional[pulumi.Input['DisplayDeviceArgs']]): pulumi.set(self, "display_device", value) @property @pulumi.getter(name="guestAccelerators") def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]: """ A list of guest accelerator cards' type and count to use for instances created from these properties. """ return pulumi.get(self, "guest_accelerators") @guest_accelerators.setter def guest_accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]): pulumi.set(self, "guest_accelerators", value) @property @pulumi.getter(name="keyRevocationActionType") def key_revocation_action_type(self) -> Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']]: """ KeyRevocationActionType of the instance. """ return pulumi.get(self, "key_revocation_action_type") @key_revocation_action_type.setter def key_revocation_action_type(self, value: Optional[pulumi.Input['InstancePropertiesKeyRevocationActionType']]): pulumi.set(self, "key_revocation_action_type", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to instances that are created from these properties. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="machineType") def machine_type(self) -> Optional[pulumi.Input[str]]: """ The machine type to use for instances that are created from these properties. """ return pulumi.get(self, "machine_type") @machine_type.setter def machine_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "machine_type", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['MetadataArgs']]: """ The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['MetadataArgs']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[pulumi.Input[str]]: """ Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. """ return pulumi.get(self, "min_cpu_platform") @min_cpu_platform.setter def min_cpu_platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "min_cpu_platform", value) @property @pulumi.getter(name="networkInterfaces") def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]]: """ An array of network access configurations for this interface. """ return pulumi.get(self, "network_interfaces") @network_interfaces.setter def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]]]): pulumi.set(self, "network_interfaces", value) @property @pulumi.getter(name="networkPerformanceConfig") def network_performance_config(self) -> Optional[pulumi.Input['NetworkPerformanceConfigArgs']]: """ Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "network_performance_config") @network_performance_config.setter def network_performance_config(self, value: Optional[pulumi.Input['NetworkPerformanceConfigArgs']]): pulumi.set(self, "network_performance_config", value) @property @pulumi.getter(name="postKeyRevocationActionType") def post_key_revocation_action_type(self) -> Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']]: """ PostKeyRevocationActionType of the instance. """ return pulumi.get(self, "post_key_revocation_action_type") @post_key_revocation_action_type.setter def post_key_revocation_action_type(self, value: Optional[pulumi.Input['InstancePropertiesPostKeyRevocationActionType']]): pulumi.set(self, "post_key_revocation_action_type", value) @property @pulumi.getter(name="privateIpv6GoogleAccess") def private_ipv6_google_access(self) -> Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']]: """ The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "private_ipv6_google_access") @private_ipv6_google_access.setter def private_ipv6_google_access(self, value: Optional[pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess']]): pulumi.set(self, "private_ipv6_google_access", value) @property @pulumi.getter(name="reservationAffinity") def reservation_affinity(self) -> Optional[pulumi.Input['ReservationAffinityArgs']]: """ Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "reservation_affinity") @reservation_affinity.setter def reservation_affinity(self, value: Optional[pulumi.Input['ReservationAffinityArgs']]): pulumi.set(self, "reservation_affinity", value) @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. """ return pulumi.get(self, "resource_manager_tags") @resource_manager_tags.setter def resource_manager_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "resource_manager_tags", value) @property @pulumi.getter(name="resourcePolicies") def resource_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "resource_policies") @resource_policies.setter def resource_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "resource_policies", value) @property @pulumi.getter def scheduling(self) -> Optional[pulumi.Input['SchedulingArgs']]: """ Specifies the scheduling options for the instances that are created from these properties. """ return pulumi.get(self, "scheduling") @scheduling.setter def scheduling(self, value: Optional[pulumi.Input['SchedulingArgs']]): pulumi.set(self, "scheduling", value) @property @pulumi.getter(name="secureTags") def secure_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "secure_tags") @secure_tags.setter def secure_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "secure_tags", value) @property @pulumi.getter(name="serviceAccounts") def service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]]: """ A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. """ return pulumi.get(self, "service_accounts") @service_accounts.setter def service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]]]): pulumi.set(self, "service_accounts", value) @property @pulumi.getter(name="shieldedInstanceConfig") def shielded_instance_config(self) -> Optional[pulumi.Input['ShieldedInstanceConfigArgs']]: """ Note that for MachineImage, this is not supported yet. """ return pulumi.get(self, "shielded_instance_config") @shielded_instance_config.setter def shielded_instance_config(self, value: Optional[pulumi.Input['ShieldedInstanceConfigArgs']]): pulumi.set(self, "shielded_instance_config", value) @property @pulumi.getter(name="shieldedVmConfig") def shielded_vm_config(self) -> Optional[pulumi.Input['ShieldedVmConfigArgs']]: """ Specifies the Shielded VM options for the instances that are created from these properties. """ return pulumi.get(self, "shielded_vm_config") @shielded_vm_config.setter def shielded_vm_config(self, value: Optional[pulumi.Input['ShieldedVmConfigArgs']]): pulumi.set(self, "shielded_vm_config", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input['TagsArgs']]: """ A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input['TagsArgs']]): pulumi.set(self, "tags", value) @pulumi.input_type class Int64RangeMatchArgs: def __init__(__self__, *, range_end: Optional[pulumi.Input[str]] = None, range_start: Optional[pulumi.Input[str]] = None): """ HttpRouteRuleMatch criteria for field values that must stay within the specified integer range. :param pulumi.Input[str] range_end: The end of the range (exclusive) in signed long integer format. :param pulumi.Input[str] range_start: The start of the range (inclusive) in signed long integer format. """ if range_end is not None: pulumi.set(__self__, "range_end", range_end) if range_start is not None: pulumi.set(__self__, "range_start", range_start) @property @pulumi.getter(name="rangeEnd") def range_end(self) -> Optional[pulumi.Input[str]]: """ The end of the range (exclusive) in signed long integer format. """ return pulumi.get(self, "range_end") @range_end.setter def range_end(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_end", value) @property @pulumi.getter(name="rangeStart") def range_start(self) -> Optional[pulumi.Input[str]]: """ The start of the range (inclusive) in signed long integer format. """ return pulumi.get(self, "range_start") @range_start.setter def range_start(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_start", value) @pulumi.input_type class InterconnectAttachmentPartnerMetadataArgs: def __init__(__self__, *, interconnect_name: Optional[pulumi.Input[str]] = None, partner_name: Optional[pulumi.Input[str]] = None, portal_url: Optional[pulumi.Input[str]] = None): """ Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments. :param pulumi.Input[str] interconnect_name: Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. :param pulumi.Input[str] partner_name: Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. :param pulumi.Input[str] portal_url: URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. """ if interconnect_name is not None: pulumi.set(__self__, "interconnect_name", interconnect_name) if partner_name is not None: pulumi.set(__self__, "partner_name", partner_name) if portal_url is not None: pulumi.set(__self__, "portal_url", portal_url) @property @pulumi.getter(name="interconnectName") def interconnect_name(self) -> Optional[pulumi.Input[str]]: """ Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. """ return pulumi.get(self, "interconnect_name") @interconnect_name.setter def interconnect_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interconnect_name", value) @property @pulumi.getter(name="partnerName") def partner_name(self) -> Optional[pulumi.Input[str]]: """ Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. """ return pulumi.get(self, "partner_name") @partner_name.setter def partner_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "partner_name", value) @property @pulumi.getter(name="portalUrl") def portal_url(self) -> Optional[pulumi.Input[str]]: """ URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. """ return pulumi.get(self, "portal_url") @portal_url.setter def portal_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "portal_url", value) @pulumi.input_type class InterconnectMacsecPreSharedKeyArgs: def __init__(__self__, *, name: pulumi.Input[str], start_time: Optional[pulumi.Input[str]] = None): """ Describes a pre-shared key used to setup MACsec in static connectivity association key (CAK) mode. :param pulumi.Input[str] name: A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] start_time: A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. """ pulumi.set(__self__, "name", name) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class InterconnectMacsecArgs: def __init__(__self__, *, pre_shared_keys: pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]], fail_open: Optional[pulumi.Input[bool]] = None): """ Configuration information for enabling Media Access Control security (Macsec) on this Interconnect between Google and your on-premises router. :param pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]] pre_shared_keys: A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. :param pulumi.Input[bool] fail_open: If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. """ pulumi.set(__self__, "pre_shared_keys", pre_shared_keys) if fail_open is not None: pulumi.set(__self__, "fail_open", fail_open) @property @pulumi.getter(name="preSharedKeys") def pre_shared_keys(self) -> pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]]: """ A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. """ return pulumi.get(self, "pre_shared_keys") @pre_shared_keys.setter def pre_shared_keys(self, value: pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]]): pulumi.set(self, "pre_shared_keys", value) @property @pulumi.getter(name="failOpen") def fail_open(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. """ return pulumi.get(self, "fail_open") @fail_open.setter def fail_open(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "fail_open", value) @pulumi.input_type class LicenseResourceCommitmentArgs: def __init__(__self__, *, amount: Optional[pulumi.Input[str]] = None, cores_per_license: Optional[pulumi.Input[str]] = None, license: Optional[pulumi.Input[str]] = None): """ Commitment for a particular license resource. :param pulumi.Input[str] amount: The number of licenses purchased. :param pulumi.Input[str] cores_per_license: Specifies the core range of the instance for which this license applies. :param pulumi.Input[str] license: Any applicable license URI. """ if amount is not None: pulumi.set(__self__, "amount", amount) if cores_per_license is not None: pulumi.set(__self__, "cores_per_license", cores_per_license) if license is not None: pulumi.set(__self__, "license", license) @property @pulumi.getter def amount(self) -> Optional[pulumi.Input[str]]: """ The number of licenses purchased. """ return pulumi.get(self, "amount") @amount.setter def amount(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "amount", value) @property @pulumi.getter(name="coresPerLicense") def cores_per_license(self) -> Optional[pulumi.Input[str]]: """ Specifies the core range of the instance for which this license applies. """ return pulumi.get(self, "cores_per_license") @cores_per_license.setter def cores_per_license(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cores_per_license", value) @property @pulumi.getter def license(self) -> Optional[pulumi.Input[str]]: """ Any applicable license URI. """ return pulumi.get(self, "license") @license.setter def license(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "license", value) @pulumi.input_type class LicenseResourceRequirementsArgs: def __init__(__self__, *, min_guest_cpu_count: Optional[pulumi.Input[int]] = None, min_memory_mb: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] min_guest_cpu_count: Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. :param pulumi.Input[int] min_memory_mb: Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. """ if min_guest_cpu_count is not None: pulumi.set(__self__, "min_guest_cpu_count", min_guest_cpu_count) if min_memory_mb is not None: pulumi.set(__self__, "min_memory_mb", min_memory_mb) @property @pulumi.getter(name="minGuestCpuCount") def min_guest_cpu_count(self) -> Optional[pulumi.Input[int]]: """ Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. """ return pulumi.get(self, "min_guest_cpu_count") @min_guest_cpu_count.setter def min_guest_cpu_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_guest_cpu_count", value) @property @pulumi.getter(name="minMemoryMb") def min_memory_mb(self) -> Optional[pulumi.Input[int]]: """ Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. """ return pulumi.get(self, "min_memory_mb") @min_memory_mb.setter def min_memory_mb(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_memory_mb", value) @pulumi.input_type class LocalDiskArgs: def __init__(__self__, *, disk_count: Optional[pulumi.Input[int]] = None, disk_size_gb: Optional[pulumi.Input[int]] = None, disk_type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] disk_count: Specifies the number of such disks. :param pulumi.Input[int] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input[str] disk_type: Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. """ if disk_count is not None: pulumi.set(__self__, "disk_count", disk_count) if disk_size_gb is not None: pulumi.set(__self__, "disk_size_gb", disk_size_gb) if disk_type is not None: pulumi.set(__self__, "disk_type", disk_type) @property @pulumi.getter(name="diskCount") def disk_count(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of such disks. """ return pulumi.get(self, "disk_count") @disk_count.setter def disk_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "disk_count", value) @property @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[int]]: """ Specifies the size of the disk in base-2 GB. """ return pulumi.get(self, "disk_size_gb") @disk_size_gb.setter def disk_size_gb(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "disk_size_gb", value) @property @pulumi.getter(name="diskType") def disk_type(self) -> Optional[pulumi.Input[str]]: """ Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. """ return pulumi.get(self, "disk_type") @disk_type.setter def disk_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "disk_type", value) @pulumi.input_type class LogConfigCloudAuditOptionsArgs: def __init__(__self__, *, authorization_logging_options: Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']] = None, log_name: Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsArgs'] authorization_logging_options: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsLogName'] log_name: This is deprecated and has no effect. Do not use. """ if authorization_logging_options is not None: pulumi.set(__self__, "authorization_logging_options", authorization_logging_options) if log_name is not None: pulumi.set(__self__, "log_name", log_name) @property @pulumi.getter(name="authorizationLoggingOptions") def authorization_logging_options(self) -> Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "authorization_logging_options") @authorization_logging_options.setter def authorization_logging_options(self, value: Optional[pulumi.Input['AuthorizationLoggingOptionsArgs']]): pulumi.set(self, "authorization_logging_options", value) @property @pulumi.getter(name="logName") def log_name(self) -> Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_name") @log_name.setter def log_name(self, value: Optional[pulumi.Input['LogConfigCloudAuditOptionsLogName']]): pulumi.set(self, "log_name", value) @pulumi.input_type class LogConfigCounterOptionsCustomFieldArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input[str] name: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] value: This is deprecated and has no effect. Do not use. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class LogConfigCounterOptionsArgs: def __init__(__self__, *, custom_fields: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]] = None, field: Optional[pulumi.Input[str]] = None, metric: Optional[pulumi.Input[str]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]] custom_fields: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] field: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] metric: This is deprecated and has no effect. Do not use. """ if custom_fields is not None: pulumi.set(__self__, "custom_fields", custom_fields) if field is not None: pulumi.set(__self__, "field", field) if metric is not None: pulumi.set(__self__, "metric", metric) @property @pulumi.getter(name="customFields") def custom_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "custom_fields") @custom_fields.setter def custom_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]]]): pulumi.set(self, "custom_fields", value) @property @pulumi.getter def field(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "field") @field.setter def field(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "field", value) @property @pulumi.getter def metric(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "metric") @metric.setter def metric(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "metric", value) @pulumi.input_type class LogConfigDataAccessOptionsArgs: def __init__(__self__, *, log_mode: Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsLogMode'] log_mode: This is deprecated and has no effect. Do not use. """ if log_mode is not None: pulumi.set(__self__, "log_mode", log_mode) @property @pulumi.getter(name="logMode") def log_mode(self) -> Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_mode") @log_mode.setter def log_mode(self, value: Optional[pulumi.Input['LogConfigDataAccessOptionsLogMode']]): pulumi.set(self, "log_mode", value) @pulumi.input_type class LogConfigArgs: def __init__(__self__, *, cloud_audit: Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']] = None, counter: Optional[pulumi.Input['LogConfigCounterOptionsArgs']] = None, data_access: Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsArgs'] cloud_audit: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCounterOptionsArgs'] counter: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsArgs'] data_access: This is deprecated and has no effect. Do not use. """ if cloud_audit is not None: pulumi.set(__self__, "cloud_audit", cloud_audit) if counter is not None: pulumi.set(__self__, "counter", counter) if data_access is not None: pulumi.set(__self__, "data_access", data_access) @property @pulumi.getter(name="cloudAudit") def cloud_audit(self) -> Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "cloud_audit") @cloud_audit.setter def cloud_audit(self, value: Optional[pulumi.Input['LogConfigCloudAuditOptionsArgs']]): pulumi.set(self, "cloud_audit", value) @property @pulumi.getter def counter(self) -> Optional[pulumi.Input['LogConfigCounterOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "counter") @counter.setter def counter(self, value: Optional[pulumi.Input['LogConfigCounterOptionsArgs']]): pulumi.set(self, "counter", value) @property @pulumi.getter(name="dataAccess") def data_access(self) -> Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "data_access") @data_access.setter def data_access(self, value: Optional[pulumi.Input['LogConfigDataAccessOptionsArgs']]): pulumi.set(self, "data_access", value) @pulumi.input_type class MetadataCredentialsFromPluginArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, struct_config: Optional[pulumi.Input[str]] = None): """ [Deprecated] Custom authenticator credentials. Custom authenticator credentials. :param pulumi.Input[str] name: Plugin name. :param pulumi.Input[str] struct_config: A text proto that conforms to a Struct type definition interpreted by the plugin. """ if name is not None: pulumi.set(__self__, "name", name) if struct_config is not None: pulumi.set(__self__, "struct_config", struct_config) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Plugin name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="structConfig") def struct_config(self) -> Optional[pulumi.Input[str]]: """ A text proto that conforms to a Struct type definition interpreted by the plugin. """ return pulumi.get(self, "struct_config") @struct_config.setter def struct_config(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "struct_config", value) @pulumi.input_type class MetadataFilterLabelMatchArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer. :param pulumi.Input[str] name: Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. :param pulumi.Input[str] value: The value of the label must match the specified value. value can have a maximum length of 1024 characters. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ The value of the label must match the specified value. value can have a maximum length of 1024 characters. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class MetadataFilterArgs: def __init__(__self__, *, filter_labels: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]] = None, filter_match_criteria: Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']] = None): """ Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]] filter_labels: The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. :param pulumi.Input['MetadataFilterFilterMatchCriteria'] filter_match_criteria: Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. """ if filter_labels is not None: pulumi.set(__self__, "filter_labels", filter_labels) if filter_match_criteria is not None: pulumi.set(__self__, "filter_match_criteria", filter_match_criteria) @property @pulumi.getter(name="filterLabels") def filter_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]]: """ The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. """ return pulumi.get(self, "filter_labels") @filter_labels.setter def filter_labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]]]): pulumi.set(self, "filter_labels", value) @property @pulumi.getter(name="filterMatchCriteria") def filter_match_criteria(self) -> Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']]: """ Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. """ return pulumi.get(self, "filter_match_criteria") @filter_match_criteria.setter def filter_match_criteria(self, value: Optional[pulumi.Input['MetadataFilterFilterMatchCriteria']]): pulumi.set(self, "filter_match_criteria", value) @pulumi.input_type class MetadataItemsItemArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ Metadata :param pulumi.Input[str] key: Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. :param pulumi.Input[str] value: Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). """ if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class MetadataArgs: def __init__(__self__, *, items: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]] = None): """ A metadata key/value entry. :param pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]] items: Array of key/value pairs. The total size of all keys and values must be less than 512 KB. """ if items is not None: pulumi.set(__self__, "items", items) @property @pulumi.getter def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]]: """ Array of key/value pairs. The total size of all keys and values must be less than 512 KB. """ return pulumi.get(self, "items") @items.setter def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]]]): pulumi.set(self, "items", value) @pulumi.input_type class NamedPortArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, port: Optional[pulumi.Input[int]] = None): """ The named port. For example: <"http", 80>. :param pulumi.Input[str] name: The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. :param pulumi.Input[int] port: The port number, which can be a value between 1 and 65535. """ if name is not None: pulumi.set(__self__, "name", name) if port is not None: pulumi.set(__self__, "port", port) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The port number, which can be a value between 1 and 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @pulumi.input_type class NetworkEndpointGroupAppEngineArgs: def __init__(__self__, *, service: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None): """ Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". :param pulumi.Input[str] url_mask: A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. :param pulumi.Input[str] version: Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". """ if service is not None: pulumi.set(__self__, "service", service) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @property @pulumi.getter def version(self) -> Optional[pulumi.Input[str]]: """ Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". """ return pulumi.get(self, "version") @version.setter def version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version", value) @pulumi.input_type class NetworkEndpointGroupCloudFunctionArgs: def __init__(__self__, *, function: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None): """ Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] function: A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". :param pulumi.Input[str] url_mask: A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. """ if function is not None: pulumi.set(__self__, "function", function) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) @property @pulumi.getter def function(self) -> Optional[pulumi.Input[str]]: """ A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". """ return pulumi.get(self, "function") @function.setter def function(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "function", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @pulumi.input_type class NetworkEndpointGroupCloudRunArgs: def __init__(__self__, *, service: Optional[pulumi.Input[str]] = None, tag: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None): """ Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". :param pulumi.Input[str] tag: Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". :param pulumi.Input[str] url_mask: A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. """ if service is not None: pulumi.set(__self__, "service", service) if tag is not None: pulumi.set(__self__, "tag", tag) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter def tag(self) -> Optional[pulumi.Input[str]]: """ Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". """ return pulumi.get(self, "tag") @tag.setter def tag(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tag", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @pulumi.input_type class NetworkEndpointGroupServerlessDeploymentArgs: def __init__(__self__, *, platform: Optional[pulumi.Input[str]] = None, resource: Optional[pulumi.Input[str]] = None, url_mask: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None): """ Configuration for a serverless network endpoint group (NEG). The platform must be provided. Note: The target backend service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] platform: The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com :param pulumi.Input[str] resource: The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name :param pulumi.Input[str] url_mask: A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag :param pulumi.Input[str] version: The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag """ if platform is not None: pulumi.set(__self__, "platform", platform) if resource is not None: pulumi.set(__self__, "resource", resource) if url_mask is not None: pulumi.set(__self__, "url_mask", url_mask) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def platform(self) -> Optional[pulumi.Input[str]]: """ The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com """ return pulumi.get(self, "platform") @platform.setter def platform(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "platform", value) @property @pulumi.getter def resource(self) -> Optional[pulumi.Input[str]]: """ The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name """ return pulumi.get(self, "resource") @resource.setter def resource(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource", value) @property @pulumi.getter(name="urlMask") def url_mask(self) -> Optional[pulumi.Input[str]]: """ A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag """ return pulumi.get(self, "url_mask") @url_mask.setter def url_mask(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url_mask", value) @property @pulumi.getter def version(self) -> Optional[pulumi.Input[str]]: """ The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag """ return pulumi.get(self, "version") @version.setter def version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version", value) @pulumi.input_type class NetworkInterfaceSubInterfaceArgs: def __init__(__self__, *, ip_address: Optional[pulumi.Input[str]] = None, ip_allocation_mode: Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']] = None, subnetwork: Optional[pulumi.Input[str]] = None, vlan: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] ip_address: An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. :param pulumi.Input[str] subnetwork: If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork :param pulumi.Input[int] vlan: VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. """ if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if ip_allocation_mode is not None: pulumi.set(__self__, "ip_allocation_mode", ip_allocation_mode) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) if vlan is not None: pulumi.set(__self__, "vlan", vlan) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="ipAllocationMode") def ip_allocation_mode(self) -> Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']]: return pulumi.get(self, "ip_allocation_mode") @ip_allocation_mode.setter def ip_allocation_mode(self, value: Optional[pulumi.Input['NetworkInterfaceSubInterfaceIpAllocationMode']]): pulumi.set(self, "ip_allocation_mode", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @property @pulumi.getter def vlan(self) -> Optional[pulumi.Input[int]]: """ VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. """ return pulumi.get(self, "vlan") @vlan.setter def vlan(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vlan", value) @pulumi.input_type class NetworkInterfaceArgs: def __init__(__self__, *, access_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]] = None, alias_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]] = None, internal_ipv6_prefix_length: Optional[pulumi.Input[int]] = None, ipv6_access_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]] = None, ipv6_address: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, network_ip: Optional[pulumi.Input[str]] = None, nic_type: Optional[pulumi.Input['NetworkInterfaceNicType']] = None, queue_count: Optional[pulumi.Input[int]] = None, stack_type: Optional[pulumi.Input['NetworkInterfaceStackType']] = None, subinterfaces: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]] = None, subnetwork: Optional[pulumi.Input[str]] = None): """ A network interface resource attached to an instance. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] access_configs: An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. :param pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]] alias_ip_ranges: An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. :param pulumi.Input[int] internal_ipv6_prefix_length: The prefix length of the primary internal IPv6 range. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] ipv6_access_configs: An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. :param pulumi.Input[str] ipv6_address: An IPv6 internal network address for this network interface. :param pulumi.Input[str] network: URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default :param pulumi.Input[str] network_ip: An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. :param pulumi.Input['NetworkInterfaceNicType'] nic_type: The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. :param pulumi.Input[int] queue_count: The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. :param pulumi.Input['NetworkInterfaceStackType'] stack_type: The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]] subinterfaces: SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. :param pulumi.Input[str] subnetwork: The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ if access_configs is not None: pulumi.set(__self__, "access_configs", access_configs) if alias_ip_ranges is not None: pulumi.set(__self__, "alias_ip_ranges", alias_ip_ranges) if internal_ipv6_prefix_length is not None: pulumi.set(__self__, "internal_ipv6_prefix_length", internal_ipv6_prefix_length) if ipv6_access_configs is not None: pulumi.set(__self__, "ipv6_access_configs", ipv6_access_configs) if ipv6_address is not None: pulumi.set(__self__, "ipv6_address", ipv6_address) if network is not None: pulumi.set(__self__, "network", network) if network_ip is not None: pulumi.set(__self__, "network_ip", network_ip) if nic_type is not None: pulumi.set(__self__, "nic_type", nic_type) if queue_count is not None: pulumi.set(__self__, "queue_count", queue_count) if stack_type is not None: pulumi.set(__self__, "stack_type", stack_type) if subinterfaces is not None: pulumi.set(__self__, "subinterfaces", subinterfaces) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) @property @pulumi.getter(name="accessConfigs") def access_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]: """ An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. """ return pulumi.get(self, "access_configs") @access_configs.setter def access_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]): pulumi.set(self, "access_configs", value) @property @pulumi.getter(name="aliasIpRanges") def alias_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]]: """ An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. """ return pulumi.get(self, "alias_ip_ranges") @alias_ip_ranges.setter def alias_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]]]): pulumi.set(self, "alias_ip_ranges", value) @property @pulumi.getter(name="internalIpv6PrefixLength") def internal_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]: """ The prefix length of the primary internal IPv6 range. """ return pulumi.get(self, "internal_ipv6_prefix_length") @internal_ipv6_prefix_length.setter def internal_ipv6_prefix_length(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "internal_ipv6_prefix_length", value) @property @pulumi.getter(name="ipv6AccessConfigs") def ipv6_access_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]: """ An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. """ return pulumi.get(self, "ipv6_access_configs") @ipv6_access_configs.setter def ipv6_access_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]]]): pulumi.set(self, "ipv6_access_configs", value) @property @pulumi.getter(name="ipv6Address") def ipv6_address(self) -> Optional[pulumi.Input[str]]: """ An IPv6 internal network address for this network interface. """ return pulumi.get(self, "ipv6_address") @ipv6_address.setter def ipv6_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ipv6_address", value) @property @pulumi.getter def network(self) -> Optional[pulumi.Input[str]]: """ URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default """ return pulumi.get(self, "network") @network.setter def network(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network", value) @property @pulumi.getter(name="networkIP") def network_ip(self) -> Optional[pulumi.Input[str]]: """ An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. """ return pulumi.get(self, "network_ip") @network_ip.setter def network_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_ip", value) @property @pulumi.getter(name="nicType") def nic_type(self) -> Optional[pulumi.Input['NetworkInterfaceNicType']]: """ The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. """ return pulumi.get(self, "nic_type") @nic_type.setter def nic_type(self, value: Optional[pulumi.Input['NetworkInterfaceNicType']]): pulumi.set(self, "nic_type", value) @property @pulumi.getter(name="queueCount") def queue_count(self) -> Optional[pulumi.Input[int]]: """ The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. """ return pulumi.get(self, "queue_count") @queue_count.setter def queue_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "queue_count", value) @property @pulumi.getter(name="stackType") def stack_type(self) -> Optional[pulumi.Input['NetworkInterfaceStackType']]: """ The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. """ return pulumi.get(self, "stack_type") @stack_type.setter def stack_type(self, value: Optional[pulumi.Input['NetworkInterfaceStackType']]): pulumi.set(self, "stack_type", value) @property @pulumi.getter def subinterfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]]: """ SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. """ return pulumi.get(self, "subinterfaces") @subinterfaces.setter def subinterfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]]]): pulumi.set(self, "subinterfaces", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @pulumi.input_type class NetworkPerformanceConfigArgs: def __init__(__self__, *, external_ip_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']] = None, total_egress_bandwidth_tier: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']] = None): if external_ip_egress_bandwidth_tier is not None: pulumi.set(__self__, "external_ip_egress_bandwidth_tier", external_ip_egress_bandwidth_tier) if total_egress_bandwidth_tier is not None: pulumi.set(__self__, "total_egress_bandwidth_tier", total_egress_bandwidth_tier) @property @pulumi.getter(name="externalIpEgressBandwidthTier") def external_ip_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]: return pulumi.get(self, "external_ip_egress_bandwidth_tier") @external_ip_egress_bandwidth_tier.setter def external_ip_egress_bandwidth_tier(self, value: Optional[pulumi.Input['NetworkPerformanceConfigExternalIpEgressBandwidthTier']]): pulumi.set(self, "external_ip_egress_bandwidth_tier", value) @property @pulumi.getter(name="totalEgressBandwidthTier") def total_egress_bandwidth_tier(self) -> Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]: return pulumi.get(self, "total_egress_bandwidth_tier") @total_egress_bandwidth_tier.setter def total_egress_bandwidth_tier(self, value: Optional[pulumi.Input['NetworkPerformanceConfigTotalEgressBandwidthTier']]): pulumi.set(self, "total_egress_bandwidth_tier", value) @pulumi.input_type class NetworkRoutingConfigArgs: def __init__(__self__, *, routing_mode: Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']] = None): """ A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide. :param pulumi.Input['NetworkRoutingConfigRoutingMode'] routing_mode: The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. """ if routing_mode is not None: pulumi.set(__self__, "routing_mode", routing_mode) @property @pulumi.getter(name="routingMode") def routing_mode(self) -> Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']]: """ The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. """ return pulumi.get(self, "routing_mode") @routing_mode.setter def routing_mode(self, value: Optional[pulumi.Input['NetworkRoutingConfigRoutingMode']]): pulumi.set(self, "routing_mode", value) @pulumi.input_type class NodeGroupAutoscalingPolicyArgs: def __init__(__self__, *, max_nodes: Optional[pulumi.Input[int]] = None, min_nodes: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']] = None): """ :param pulumi.Input[int] max_nodes: The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. :param pulumi.Input[int] min_nodes: The minimum number of nodes that the group should have. :param pulumi.Input['NodeGroupAutoscalingPolicyMode'] mode: The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. """ if max_nodes is not None: pulumi.set(__self__, "max_nodes", max_nodes) if min_nodes is not None: pulumi.set(__self__, "min_nodes", min_nodes) if mode is not None: pulumi.set(__self__, "mode", mode) @property @pulumi.getter(name="maxNodes") def max_nodes(self) -> Optional[pulumi.Input[int]]: """ The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. """ return pulumi.get(self, "max_nodes") @max_nodes.setter def max_nodes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_nodes", value) @property @pulumi.getter(name="minNodes") def min_nodes(self) -> Optional[pulumi.Input[int]]: """ The minimum number of nodes that the group should have. """ return pulumi.get(self, "min_nodes") @min_nodes.setter def min_nodes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_nodes", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']]: """ The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['NodeGroupAutoscalingPolicyMode']]): pulumi.set(self, "mode", value) @pulumi.input_type class NodeGroupMaintenanceWindowArgs: def __init__(__self__, *, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for daily maintenance operations. GCE's internal maintenance will be performed within this window. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class NodeTemplateNodeTypeFlexibilityArgs: def __init__(__self__, *, cpus: Optional[pulumi.Input[str]] = None, local_ssd: Optional[pulumi.Input[str]] = None, memory: Optional[pulumi.Input[str]] = None): if cpus is not None: pulumi.set(__self__, "cpus", cpus) if local_ssd is not None: pulumi.set(__self__, "local_ssd", local_ssd) if memory is not None: pulumi.set(__self__, "memory", memory) @property @pulumi.getter def cpus(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "cpus") @cpus.setter def cpus(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpus", value) @property @pulumi.getter(name="localSsd") def local_ssd(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "local_ssd") @local_ssd.setter def local_ssd(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "local_ssd", value) @property @pulumi.getter def memory(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "memory") @memory.setter def memory(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "memory", value) @pulumi.input_type class NotificationEndpointGrpcSettingsArgs: def __init__(__self__, *, authority: Optional[pulumi.Input[str]] = None, endpoint: Optional[pulumi.Input[str]] = None, payload_name: Optional[pulumi.Input[str]] = None, resend_interval: Optional[pulumi.Input['DurationArgs']] = None, retry_duration_sec: Optional[pulumi.Input[int]] = None): """ Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint. :param pulumi.Input[str] authority: Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 :param pulumi.Input[str] endpoint: Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. :param pulumi.Input[str] payload_name: Optional. If specified, this field is used to populate the "name" field in gRPC requests. :param pulumi.Input['DurationArgs'] resend_interval: Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. :param pulumi.Input[int] retry_duration_sec: How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. """ if authority is not None: pulumi.set(__self__, "authority", authority) if endpoint is not None: pulumi.set(__self__, "endpoint", endpoint) if payload_name is not None: pulumi.set(__self__, "payload_name", payload_name) if resend_interval is not None: pulumi.set(__self__, "resend_interval", resend_interval) if retry_duration_sec is not None: pulumi.set(__self__, "retry_duration_sec", retry_duration_sec) @property @pulumi.getter def authority(self) -> Optional[pulumi.Input[str]]: """ Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 """ return pulumi.get(self, "authority") @authority.setter def authority(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "authority", value) @property @pulumi.getter def endpoint(self) -> Optional[pulumi.Input[str]]: """ Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. """ return pulumi.get(self, "endpoint") @endpoint.setter def endpoint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "endpoint", value) @property @pulumi.getter(name="payloadName") def payload_name(self) -> Optional[pulumi.Input[str]]: """ Optional. If specified, this field is used to populate the "name" field in gRPC requests. """ return pulumi.get(self, "payload_name") @payload_name.setter def payload_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "payload_name", value) @property @pulumi.getter(name="resendInterval") def resend_interval(self) -> Optional[pulumi.Input['DurationArgs']]: """ Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. """ return pulumi.get(self, "resend_interval") @resend_interval.setter def resend_interval(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "resend_interval", value) @property @pulumi.getter(name="retryDurationSec") def retry_duration_sec(self) -> Optional[pulumi.Input[int]]: """ How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. """ return pulumi.get(self, "retry_duration_sec") @retry_duration_sec.setter def retry_duration_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "retry_duration_sec", value) @pulumi.input_type class OutlierDetectionArgs: def __init__(__self__, *, base_ejection_time: Optional[pulumi.Input['DurationArgs']] = None, consecutive_errors: Optional[pulumi.Input[int]] = None, consecutive_gateway_failure: Optional[pulumi.Input[int]] = None, enforcing_consecutive_errors: Optional[pulumi.Input[int]] = None, enforcing_consecutive_gateway_failure: Optional[pulumi.Input[int]] = None, enforcing_success_rate: Optional[pulumi.Input[int]] = None, interval: Optional[pulumi.Input['DurationArgs']] = None, max_ejection_percent: Optional[pulumi.Input[int]] = None, success_rate_minimum_hosts: Optional[pulumi.Input[int]] = None, success_rate_request_volume: Optional[pulumi.Input[int]] = None, success_rate_stdev_factor: Optional[pulumi.Input[int]] = None): """ Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. :param pulumi.Input['DurationArgs'] base_ejection_time: The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. :param pulumi.Input[int] consecutive_errors: Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. :param pulumi.Input[int] consecutive_gateway_failure: The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. :param pulumi.Input[int] enforcing_consecutive_errors: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. :param pulumi.Input[int] enforcing_consecutive_gateway_failure: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input[int] enforcing_success_rate: The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input['DurationArgs'] interval: Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. :param pulumi.Input[int] max_ejection_percent: Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. :param pulumi.Input[int] success_rate_minimum_hosts: The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. :param pulumi.Input[int] success_rate_request_volume: The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. :param pulumi.Input[int] success_rate_stdev_factor: This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. """ if base_ejection_time is not None: pulumi.set(__self__, "base_ejection_time", base_ejection_time) if consecutive_errors is not None: pulumi.set(__self__, "consecutive_errors", consecutive_errors) if consecutive_gateway_failure is not None: pulumi.set(__self__, "consecutive_gateway_failure", consecutive_gateway_failure) if enforcing_consecutive_errors is not None: pulumi.set(__self__, "enforcing_consecutive_errors", enforcing_consecutive_errors) if enforcing_consecutive_gateway_failure is not None: pulumi.set(__self__, "enforcing_consecutive_gateway_failure", enforcing_consecutive_gateway_failure) if enforcing_success_rate is not None: pulumi.set(__self__, "enforcing_success_rate", enforcing_success_rate) if interval is not None: pulumi.set(__self__, "interval", interval) if max_ejection_percent is not None: pulumi.set(__self__, "max_ejection_percent", max_ejection_percent) if success_rate_minimum_hosts is not None: pulumi.set(__self__, "success_rate_minimum_hosts", success_rate_minimum_hosts) if success_rate_request_volume is not None: pulumi.set(__self__, "success_rate_request_volume", success_rate_request_volume) if success_rate_stdev_factor is not None: pulumi.set(__self__, "success_rate_stdev_factor", success_rate_stdev_factor) @property @pulumi.getter(name="baseEjectionTime") def base_ejection_time(self) -> Optional[pulumi.Input['DurationArgs']]: """ The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. """ return pulumi.get(self, "base_ejection_time") @base_ejection_time.setter def base_ejection_time(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "base_ejection_time", value) @property @pulumi.getter(name="consecutiveErrors") def consecutive_errors(self) -> Optional[pulumi.Input[int]]: """ Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. """ return pulumi.get(self, "consecutive_errors") @consecutive_errors.setter def consecutive_errors(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "consecutive_errors", value) @property @pulumi.getter(name="consecutiveGatewayFailure") def consecutive_gateway_failure(self) -> Optional[pulumi.Input[int]]: """ The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. """ return pulumi.get(self, "consecutive_gateway_failure") @consecutive_gateway_failure.setter def consecutive_gateway_failure(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "consecutive_gateway_failure", value) @property @pulumi.getter(name="enforcingConsecutiveErrors") def enforcing_consecutive_errors(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. """ return pulumi.get(self, "enforcing_consecutive_errors") @enforcing_consecutive_errors.setter def enforcing_consecutive_errors(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_consecutive_errors", value) @property @pulumi.getter(name="enforcingConsecutiveGatewayFailure") def enforcing_consecutive_gateway_failure(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. """ return pulumi.get(self, "enforcing_consecutive_gateway_failure") @enforcing_consecutive_gateway_failure.setter def enforcing_consecutive_gateway_failure(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_consecutive_gateway_failure", value) @property @pulumi.getter(name="enforcingSuccessRate") def enforcing_success_rate(self) -> Optional[pulumi.Input[int]]: """ The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. """ return pulumi.get(self, "enforcing_success_rate") @enforcing_success_rate.setter def enforcing_success_rate(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "enforcing_success_rate", value) @property @pulumi.getter def interval(self) -> Optional[pulumi.Input['DurationArgs']]: """ Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. """ return pulumi.get(self, "interval") @interval.setter def interval(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "interval", value) @property @pulumi.getter(name="maxEjectionPercent") def max_ejection_percent(self) -> Optional[pulumi.Input[int]]: """ Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. """ return pulumi.get(self, "max_ejection_percent") @max_ejection_percent.setter def max_ejection_percent(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ejection_percent", value) @property @pulumi.getter(name="successRateMinimumHosts") def success_rate_minimum_hosts(self) -> Optional[pulumi.Input[int]]: """ The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. """ return pulumi.get(self, "success_rate_minimum_hosts") @success_rate_minimum_hosts.setter def success_rate_minimum_hosts(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_minimum_hosts", value) @property @pulumi.getter(name="successRateRequestVolume") def success_rate_request_volume(self) -> Optional[pulumi.Input[int]]: """ The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. """ return pulumi.get(self, "success_rate_request_volume") @success_rate_request_volume.setter def success_rate_request_volume(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_request_volume", value) @property @pulumi.getter(name="successRateStdevFactor") def success_rate_stdev_factor(self) -> Optional[pulumi.Input[int]]: """ This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. """ return pulumi.get(self, "success_rate_stdev_factor") @success_rate_stdev_factor.setter def success_rate_stdev_factor(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "success_rate_stdev_factor", value) @pulumi.input_type class PacketMirroringFilterArgs: def __init__(__self__, *, cidr_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, direction: Optional[pulumi.Input['PacketMirroringFilterDirection']] = None, ip_protocols: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_ranges: IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. :param pulumi.Input['PacketMirroringFilterDirection'] direction: Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_protocols: Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ if cidr_ranges is not None: pulumi.set(__self__, "cidr_ranges", cidr_ranges) if direction is not None: pulumi.set(__self__, "direction", direction) if ip_protocols is not None: pulumi.set(__self__, "ip_protocols", ip_protocols) @property @pulumi.getter(name="cidrRanges") def cidr_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ return pulumi.get(self, "cidr_ranges") @cidr_ranges.setter def cidr_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "cidr_ranges", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['PacketMirroringFilterDirection']]: """ Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['PacketMirroringFilterDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter(name="ipProtocols") def ip_protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. """ return pulumi.get(self, "ip_protocols") @ip_protocols.setter def ip_protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ip_protocols", value) @pulumi.input_type class PacketMirroringForwardingRuleInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoInstanceInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the virtual machine instance which is being mirrored. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the virtual machine instance which is being mirrored. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoSubnetInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PacketMirroringMirroredResourceInfoArgs: def __init__(__self__, *, instances: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]] = None, subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]] instances: A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]] subnetworks: A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. """ if instances is not None: pulumi.set(__self__, "instances", instances) if subnetworks is not None: pulumi.set(__self__, "subnetworks", subnetworks) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]]: """ A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. """ return pulumi.get(self, "instances") @instances.setter def instances(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]]]): pulumi.set(self, "instances", value) @property @pulumi.getter def subnetworks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]]: """ A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. """ return pulumi.get(self, "subnetworks") @subnetworks.setter def subnetworks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]]]): pulumi.set(self, "subnetworks", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @pulumi.input_type class PacketMirroringNetworkInfoArgs: def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] url: URL of the network resource. """ if url is not None: pulumi.set(__self__, "url", url) @property @pulumi.getter def url(self) -> Optional[pulumi.Input[str]]: """ URL of the network resource. """ return pulumi.get(self, "url") @url.setter def url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "url", value) @pulumi.input_type class PathMatcherArgs: def __init__(__self__, *, default_route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, default_service: Optional[pulumi.Input[str]] = None, default_url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None, description: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, name: Optional[pulumi.Input[str]] = None, path_rules: Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]] = None, route_rules: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]] = None): """ A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service is used. :param pulumi.Input['HttpRouteActionArgs'] default_route_action: defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. :param pulumi.Input[str] default_service: The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use :param pulumi.Input['HttpRedirectActionArgs'] default_url_redirect: When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] name: The name to which this PathMatcher is referred by the HostRule. :param pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]] path_rules: The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]] route_rules: The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. """ if default_route_action is not None: pulumi.set(__self__, "default_route_action", default_route_action) if default_service is not None: pulumi.set(__self__, "default_service", default_service) if default_url_redirect is not None: pulumi.set(__self__, "default_url_redirect", default_url_redirect) if description is not None: pulumi.set(__self__, "description", description) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if name is not None: pulumi.set(__self__, "name", name) if path_rules is not None: pulumi.set(__self__, "path_rules", path_rules) if route_rules is not None: pulumi.set(__self__, "route_rules", route_rules) @property @pulumi.getter(name="defaultRouteAction") def default_route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. """ return pulumi.get(self, "default_route_action") @default_route_action.setter def default_route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "default_route_action", value) @property @pulumi.getter(name="defaultService") def default_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use """ return pulumi.get(self, "default_service") @default_service.setter def default_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_service", value) @property @pulumi.getter(name="defaultUrlRedirect") def default_url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "default_url_redirect") @default_url_redirect.setter def default_url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "default_url_redirect", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name to which this PathMatcher is referred by the HostRule. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="pathRules") def path_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]]: """ The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. """ return pulumi.get(self, "path_rules") @path_rules.setter def path_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]]]): pulumi.set(self, "path_rules", value) @property @pulumi.getter(name="routeRules") def route_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]]: """ The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. """ return pulumi.get(self, "route_rules") @route_rules.setter def route_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]]]): pulumi.set(self, "route_rules", value) @pulumi.input_type class PathRuleArgs: def __init__(__self__, *, paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, route_action: Optional[pulumi.Input['HttpRouteActionArgs']] = None, service: Optional[pulumi.Input[str]] = None, url_redirect: Optional[pulumi.Input['HttpRedirectActionArgs']] = None): """ A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL. :param pulumi.Input[Sequence[pulumi.Input[str]]] paths: The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ if paths is not None: pulumi.set(__self__, "paths", paths) if route_action is not None: pulumi.set(__self__, "route_action", route_action) if service is not None: pulumi.set(__self__, "service", service) if url_redirect is not None: pulumi.set(__self__, "url_redirect", url_redirect) @property @pulumi.getter def paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. """ return pulumi.get(self, "paths") @paths.setter def paths(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "paths", value) @property @pulumi.getter(name="routeAction") def route_action(self) -> Optional[pulumi.Input['HttpRouteActionArgs']]: """ In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. """ return pulumi.get(self, "route_action") @route_action.setter def route_action(self, value: Optional[pulumi.Input['HttpRouteActionArgs']]): pulumi.set(self, "route_action", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @property @pulumi.getter(name="urlRedirect") def url_redirect(self) -> Optional[pulumi.Input['HttpRedirectActionArgs']]: """ When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. """ return pulumi.get(self, "url_redirect") @url_redirect.setter def url_redirect(self, value: Optional[pulumi.Input['HttpRedirectActionArgs']]): pulumi.set(self, "url_redirect", value) @pulumi.input_type class PublicDelegatedPrefixPublicDelegatedSubPrefixArgs: def __init__(__self__, *, delegatee_project: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, ip_cidr_range: Optional[pulumi.Input[str]] = None, is_address: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None): """ Represents a sub PublicDelegatedPrefix. :param pulumi.Input[str] delegatee_project: Name of the project scoping this PublicDelegatedSubPrefix. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] ip_cidr_range: The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. :param pulumi.Input[bool] is_address: Whether the sub prefix is delegated to create Address resources in the delegatee project. :param pulumi.Input[str] name: The name of the sub public delegated prefix. """ if delegatee_project is not None: pulumi.set(__self__, "delegatee_project", delegatee_project) if description is not None: pulumi.set(__self__, "description", description) if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if is_address is not None: pulumi.set(__self__, "is_address", is_address) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="delegateeProject") def delegatee_project(self) -> Optional[pulumi.Input[str]]: """ Name of the project scoping this PublicDelegatedSubPrefix. """ return pulumi.get(self, "delegatee_project") @delegatee_project.setter def delegatee_project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "delegatee_project", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="isAddress") def is_address(self) -> Optional[pulumi.Input[bool]]: """ Whether the sub prefix is delegated to create Address resources in the delegatee project. """ return pulumi.get(self, "is_address") @is_address.setter def is_address(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the sub public delegated prefix. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class RequestMirrorPolicyArgs: def __init__(__self__, *, backend_service: Optional[pulumi.Input[str]] = None): """ A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer doesn't wait for responses from the shadow service. Before sending traffic to the shadow service, the host or authority header is suffixed with -shadow. :param pulumi.Input[str] backend_service: The full or partial URL to the BackendService resource being mirrored to. """ if backend_service is not None: pulumi.set(__self__, "backend_service", backend_service) @property @pulumi.getter(name="backendService") def backend_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the BackendService resource being mirrored to. """ return pulumi.get(self, "backend_service") @backend_service.setter def backend_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "backend_service", value) @pulumi.input_type class ReservationAffinityArgs: def __init__(__self__, *, consume_reservation_type: Optional[pulumi.Input['ReservationAffinityConsumeReservationType']] = None, key: Optional[pulumi.Input[str]] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Specifies the reservations that this instance can consume from. :param pulumi.Input['ReservationAffinityConsumeReservationType'] consume_reservation_type: Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. :param pulumi.Input[str] key: Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. """ if consume_reservation_type is not None: pulumi.set(__self__, "consume_reservation_type", consume_reservation_type) if key is not None: pulumi.set(__self__, "key", key) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter(name="consumeReservationType") def consume_reservation_type(self) -> Optional[pulumi.Input['ReservationAffinityConsumeReservationType']]: """ Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. """ return pulumi.get(self, "consume_reservation_type") @consume_reservation_type.setter def consume_reservation_type(self, value: Optional[pulumi.Input['ReservationAffinityConsumeReservationType']]): pulumi.set(self, "consume_reservation_type", value) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class ReservationArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, share_settings: Optional[pulumi.Input['ShareSettingsArgs']] = None, specific_reservation: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']] = None, specific_reservation_required: Optional[pulumi.Input[bool]] = None, zone: Optional[pulumi.Input[str]] = None): """ Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input['ShareSettingsArgs'] share_settings: Share-settings for shared-reservation :param pulumi.Input['AllocationSpecificSKUReservationArgs'] specific_reservation: Reservation for instances with specific machine shapes. :param pulumi.Input[bool] specific_reservation_required: Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. :param pulumi.Input[str] zone: Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. """ if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if share_settings is not None: pulumi.set(__self__, "share_settings", share_settings) if specific_reservation is not None: pulumi.set(__self__, "specific_reservation", specific_reservation) if specific_reservation_required is not None: pulumi.set(__self__, "specific_reservation_required", specific_reservation_required) if zone is not None: pulumi.set(__self__, "zone", zone) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="shareSettings") def share_settings(self) -> Optional[pulumi.Input['ShareSettingsArgs']]: """ Share-settings for shared-reservation """ return pulumi.get(self, "share_settings") @share_settings.setter def share_settings(self, value: Optional[pulumi.Input['ShareSettingsArgs']]): pulumi.set(self, "share_settings", value) @property @pulumi.getter(name="specificReservation") def specific_reservation(self) -> Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]: """ Reservation for instances with specific machine shapes. """ return pulumi.get(self, "specific_reservation") @specific_reservation.setter def specific_reservation(self, value: Optional[pulumi.Input['AllocationSpecificSKUReservationArgs']]): pulumi.set(self, "specific_reservation", value) @property @pulumi.getter(name="specificReservationRequired") def specific_reservation_required(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. """ return pulumi.get(self, "specific_reservation_required") @specific_reservation_required.setter def specific_reservation_required(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "specific_reservation_required", value) @property @pulumi.getter def zone(self) -> Optional[pulumi.Input[str]]: """ Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. """ return pulumi.get(self, "zone") @zone.setter def zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "zone", value) @pulumi.input_type class ResourceCommitmentArgs: def __init__(__self__, *, accelerator_type: Optional[pulumi.Input[str]] = None, amount: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['ResourceCommitmentType']] = None): """ Commitment for a particular resource (a Commitment is composed of one or more of these). :param pulumi.Input[str] accelerator_type: Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. :param pulumi.Input[str] amount: The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. :param pulumi.Input['ResourceCommitmentType'] type: Type of resource for which this commitment applies. Possible values are VCPU and MEMORY """ if accelerator_type is not None: pulumi.set(__self__, "accelerator_type", accelerator_type) if amount is not None: pulumi.set(__self__, "amount", amount) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="acceleratorType") def accelerator_type(self) -> Optional[pulumi.Input[str]]: """ Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. """ return pulumi.get(self, "accelerator_type") @accelerator_type.setter def accelerator_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "accelerator_type", value) @property @pulumi.getter def amount(self) -> Optional[pulumi.Input[str]]: """ The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. """ return pulumi.get(self, "amount") @amount.setter def amount(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "amount", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['ResourceCommitmentType']]: """ Type of resource for which this commitment applies. Possible values are VCPU and MEMORY """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['ResourceCommitmentType']]): pulumi.set(self, "type", value) @pulumi.input_type class ResourcePolicyDailyCycleArgs: def __init__(__self__, *, days_in_cycle: Optional[pulumi.Input[int]] = None, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for daily operations. :param pulumi.Input[int] days_in_cycle: Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ if days_in_cycle is not None: pulumi.set(__self__, "days_in_cycle", days_in_cycle) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="daysInCycle") def days_in_cycle(self) -> Optional[pulumi.Input[int]]: """ Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. """ return pulumi.get(self, "days_in_cycle") @days_in_cycle.setter def days_in_cycle(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "days_in_cycle", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyGroupPlacementPolicyArgs: def __init__(__self__, *, availability_domain_count: Optional[pulumi.Input[int]] = None, collocation: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']] = None, locality: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']] = None, scope: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']] = None, style: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']] = None, vm_count: Optional[pulumi.Input[int]] = None): """ A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality :param pulumi.Input[int] availability_domain_count: The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network :param pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation'] collocation: Specifies network collocation :param pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality'] locality: Specifies network locality :param pulumi.Input['ResourcePolicyGroupPlacementPolicyScope'] scope: Scope specifies the availability domain to which the VMs should be spread. :param pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle'] style: Specifies instances to hosts placement relationship :param pulumi.Input[int] vm_count: Number of vms in this placement group """ if availability_domain_count is not None: pulumi.set(__self__, "availability_domain_count", availability_domain_count) if collocation is not None: pulumi.set(__self__, "collocation", collocation) if locality is not None: pulumi.set(__self__, "locality", locality) if scope is not None: pulumi.set(__self__, "scope", scope) if style is not None: pulumi.set(__self__, "style", style) if vm_count is not None: pulumi.set(__self__, "vm_count", vm_count) @property @pulumi.getter(name="availabilityDomainCount") def availability_domain_count(self) -> Optional[pulumi.Input[int]]: """ The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network """ return pulumi.get(self, "availability_domain_count") @availability_domain_count.setter def availability_domain_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "availability_domain_count", value) @property @pulumi.getter def collocation(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']]: """ Specifies network collocation """ return pulumi.get(self, "collocation") @collocation.setter def collocation(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation']]): pulumi.set(self, "collocation", value) @property @pulumi.getter def locality(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']]: """ Specifies network locality """ return pulumi.get(self, "locality") @locality.setter def locality(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality']]): pulumi.set(self, "locality", value) @property @pulumi.getter def scope(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']]: """ Scope specifies the availability domain to which the VMs should be spread. """ return pulumi.get(self, "scope") @scope.setter def scope(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyScope']]): pulumi.set(self, "scope", value) @property @pulumi.getter def style(self) -> Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']]: """ Specifies instances to hosts placement relationship """ return pulumi.get(self, "style") @style.setter def style(self, value: Optional[pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle']]): pulumi.set(self, "style", value) @property @pulumi.getter(name="vmCount") def vm_count(self) -> Optional[pulumi.Input[int]]: """ Number of vms in this placement group """ return pulumi.get(self, "vm_count") @vm_count.setter def vm_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vm_count", value) @pulumi.input_type class ResourcePolicyHourlyCycleArgs: def __init__(__self__, *, hours_in_cycle: Optional[pulumi.Input[int]] = None, start_time: Optional[pulumi.Input[str]] = None): """ Time window specified for hourly operations. :param pulumi.Input[int] hours_in_cycle: Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ if hours_in_cycle is not None: pulumi.set(__self__, "hours_in_cycle", hours_in_cycle) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter(name="hoursInCycle") def hours_in_cycle(self) -> Optional[pulumi.Input[int]]: """ Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. """ return pulumi.get(self, "hours_in_cycle") @hours_in_cycle.setter def hours_in_cycle(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "hours_in_cycle", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyInstanceSchedulePolicyScheduleArgs: def __init__(__self__, *, schedule: Optional[pulumi.Input[str]] = None): """ Schedule for an instance operation. :param pulumi.Input[str] schedule: Specifies the frequency for the operation, using the unix-cron format. """ if schedule is not None: pulumi.set(__self__, "schedule", schedule) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input[str]]: """ Specifies the frequency for the operation, using the unix-cron format. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "schedule", value) @pulumi.input_type class ResourcePolicyInstanceSchedulePolicyArgs: def __init__(__self__, *, expiration_time: Optional[pulumi.Input[str]] = None, start_time: Optional[pulumi.Input[str]] = None, time_zone: Optional[pulumi.Input[str]] = None, vm_start_schedule: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']] = None, vm_stop_schedule: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']] = None): """ An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance. :param pulumi.Input[str] expiration_time: The expiration time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] start_time: The start time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] time_zone: Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_start_schedule: Specifies the schedule for starting instances. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_stop_schedule: Specifies the schedule for stopping instances. """ if expiration_time is not None: pulumi.set(__self__, "expiration_time", expiration_time) if start_time is not None: pulumi.set(__self__, "start_time", start_time) if time_zone is not None: pulumi.set(__self__, "time_zone", time_zone) if vm_start_schedule is not None: pulumi.set(__self__, "vm_start_schedule", vm_start_schedule) if vm_stop_schedule is not None: pulumi.set(__self__, "vm_stop_schedule", vm_stop_schedule) @property @pulumi.getter(name="expirationTime") def expiration_time(self) -> Optional[pulumi.Input[str]]: """ The expiration time of the schedule. The timestamp is an RFC3339 string. """ return pulumi.get(self, "expiration_time") @expiration_time.setter def expiration_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expiration_time", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ The start time of the schedule. The timestamp is an RFC3339 string. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @property @pulumi.getter(name="timeZone") def time_zone(self) -> Optional[pulumi.Input[str]]: """ Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. """ return pulumi.get(self, "time_zone") @time_zone.setter def time_zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "time_zone", value) @property @pulumi.getter(name="vmStartSchedule") def vm_start_schedule(self) -> Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]: """ Specifies the schedule for starting instances. """ return pulumi.get(self, "vm_start_schedule") @vm_start_schedule.setter def vm_start_schedule(self, value: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]): pulumi.set(self, "vm_start_schedule", value) @property @pulumi.getter(name="vmStopSchedule") def vm_stop_schedule(self) -> Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]: """ Specifies the schedule for stopping instances. """ return pulumi.get(self, "vm_stop_schedule") @vm_stop_schedule.setter def vm_stop_schedule(self, value: Optional[pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs']]): pulumi.set(self, "vm_stop_schedule", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs: def __init__(__self__, *, max_retention_days: Optional[pulumi.Input[int]] = None, on_policy_switch: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']] = None, on_source_disk_delete: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']] = None): """ Policy for retention of scheduled snapshots. :param pulumi.Input[int] max_retention_days: Maximum age of the snapshot that is allowed to be kept. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete'] on_source_disk_delete: Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. """ if max_retention_days is not None: pulumi.set(__self__, "max_retention_days", max_retention_days) if on_policy_switch is not None: pulumi.set(__self__, "on_policy_switch", on_policy_switch) if on_source_disk_delete is not None: pulumi.set(__self__, "on_source_disk_delete", on_source_disk_delete) @property @pulumi.getter(name="maxRetentionDays") def max_retention_days(self) -> Optional[pulumi.Input[int]]: """ Maximum age of the snapshot that is allowed to be kept. """ return pulumi.get(self, "max_retention_days") @max_retention_days.setter def max_retention_days(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_retention_days", value) @property @pulumi.getter(name="onPolicySwitch") def on_policy_switch(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']]: return pulumi.get(self, "on_policy_switch") @on_policy_switch.setter def on_policy_switch(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnPolicySwitch']]): pulumi.set(self, "on_policy_switch", value) @property @pulumi.getter(name="onSourceDiskDelete") def on_source_disk_delete(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']]: """ Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. """ return pulumi.get(self, "on_source_disk_delete") @on_source_disk_delete.setter def on_source_disk_delete(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete']]): pulumi.set(self, "on_source_disk_delete", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyScheduleArgs: def __init__(__self__, *, daily_schedule: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']] = None, hourly_schedule: Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']] = None, weekly_schedule: Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']] = None): """ A schedule for disks where the schedueled operations are performed. """ if daily_schedule is not None: pulumi.set(__self__, "daily_schedule", daily_schedule) if hourly_schedule is not None: pulumi.set(__self__, "hourly_schedule", hourly_schedule) if weekly_schedule is not None: pulumi.set(__self__, "weekly_schedule", weekly_schedule) @property @pulumi.getter(name="dailySchedule") def daily_schedule(self) -> Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]: return pulumi.get(self, "daily_schedule") @daily_schedule.setter def daily_schedule(self, value: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]): pulumi.set(self, "daily_schedule", value) @property @pulumi.getter(name="hourlySchedule") def hourly_schedule(self) -> Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']]: return pulumi.get(self, "hourly_schedule") @hourly_schedule.setter def hourly_schedule(self, value: Optional[pulumi.Input['ResourcePolicyHourlyCycleArgs']]): pulumi.set(self, "hourly_schedule", value) @property @pulumi.getter(name="weeklySchedule") def weekly_schedule(self) -> Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']]: return pulumi.get(self, "weekly_schedule") @weekly_schedule.setter def weekly_schedule(self, value: Optional[pulumi.Input['ResourcePolicyWeeklyCycleArgs']]): pulumi.set(self, "weekly_schedule", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs: def __init__(__self__, *, chain_name: Optional[pulumi.Input[str]] = None, guest_flush: Optional[pulumi.Input[bool]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, storage_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Specified snapshot properties for scheduled snapshots created by this policy. :param pulumi.Input[str] chain_name: Chain name that the snapshot is created in. :param pulumi.Input[bool] guest_flush: Indication to perform a 'guest aware' snapshot. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] storage_locations: Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). """ if chain_name is not None: pulumi.set(__self__, "chain_name", chain_name) if guest_flush is not None: pulumi.set(__self__, "guest_flush", guest_flush) if labels is not None: pulumi.set(__self__, "labels", labels) if storage_locations is not None: pulumi.set(__self__, "storage_locations", storage_locations) @property @pulumi.getter(name="chainName") def chain_name(self) -> Optional[pulumi.Input[str]]: """ Chain name that the snapshot is created in. """ return pulumi.get(self, "chain_name") @chain_name.setter def chain_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "chain_name", value) @property @pulumi.getter(name="guestFlush") def guest_flush(self) -> Optional[pulumi.Input[bool]]: """ Indication to perform a 'guest aware' snapshot. """ return pulumi.get(self, "guest_flush") @guest_flush.setter def guest_flush(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "guest_flush", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="storageLocations") def storage_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). """ return pulumi.get(self, "storage_locations") @storage_locations.setter def storage_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "storage_locations", value) @pulumi.input_type class ResourcePolicySnapshotSchedulePolicyArgs: def __init__(__self__, *, retention_policy: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']] = None, schedule: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']] = None, snapshot_properties: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']] = None): """ A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs'] retention_policy: Retention policy applied to snapshots created by this resource policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs'] schedule: A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs'] snapshot_properties: Properties with which snapshots are created such as labels, encryption keys. """ if retention_policy is not None: pulumi.set(__self__, "retention_policy", retention_policy) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if snapshot_properties is not None: pulumi.set(__self__, "snapshot_properties", snapshot_properties) @property @pulumi.getter(name="retentionPolicy") def retention_policy(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']]: """ Retention policy applied to snapshots created by this resource policy. """ return pulumi.get(self, "retention_policy") @retention_policy.setter def retention_policy(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs']]): pulumi.set(self, "retention_policy", value) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']]: """ A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs']]): pulumi.set(self, "schedule", value) @property @pulumi.getter(name="snapshotProperties") def snapshot_properties(self) -> Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']]: """ Properties with which snapshots are created such as labels, encryption keys. """ return pulumi.get(self, "snapshot_properties") @snapshot_properties.setter def snapshot_properties(self, value: Optional[pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs']]): pulumi.set(self, "snapshot_properties", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs: def __init__(__self__, *, concurrency_limit: Optional[pulumi.Input[int]] = None): """ A concurrency control configuration. Defines a group config that, when attached to an instance, recognizes that instance as part of a group of instances where only up the concurrency_limit of instances in that group can undergo simultaneous maintenance. For more information: go/concurrency-control-design-doc """ if concurrency_limit is not None: pulumi.set(__self__, "concurrency_limit", concurrency_limit) @property @pulumi.getter(name="concurrencyLimit") def concurrency_limit(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "concurrency_limit") @concurrency_limit.setter def concurrency_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "concurrency_limit", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs: def __init__(__self__, *, daily_maintenance_window: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']] = None): """ A maintenance window for VMs. When set, we restrict our maintenance operations to this window. """ if daily_maintenance_window is not None: pulumi.set(__self__, "daily_maintenance_window", daily_maintenance_window) @property @pulumi.getter(name="dailyMaintenanceWindow") def daily_maintenance_window(self) -> Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]: return pulumi.get(self, "daily_maintenance_window") @daily_maintenance_window.setter def daily_maintenance_window(self, value: Optional[pulumi.Input['ResourcePolicyDailyCycleArgs']]): pulumi.set(self, "daily_maintenance_window", value) @pulumi.input_type class ResourcePolicyVmMaintenancePolicyArgs: def __init__(__self__, *, concurrency_control_group: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']] = None, maintenance_window: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']] = None): """ :param pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs'] maintenance_window: Maintenance windows that are applied to VMs covered by this policy. """ if concurrency_control_group is not None: pulumi.set(__self__, "concurrency_control_group", concurrency_control_group) if maintenance_window is not None: pulumi.set(__self__, "maintenance_window", maintenance_window) @property @pulumi.getter(name="concurrencyControlGroup") def concurrency_control_group(self) -> Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']]: return pulumi.get(self, "concurrency_control_group") @concurrency_control_group.setter def concurrency_control_group(self, value: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyConcurrencyControlArgs']]): pulumi.set(self, "concurrency_control_group", value) @property @pulumi.getter(name="maintenanceWindow") def maintenance_window(self) -> Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']]: """ Maintenance windows that are applied to VMs covered by this policy. """ return pulumi.get(self, "maintenance_window") @maintenance_window.setter def maintenance_window(self, value: Optional[pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs']]): pulumi.set(self, "maintenance_window", value) @pulumi.input_type class ResourcePolicyWeeklyCycleDayOfWeekArgs: def __init__(__self__, *, day: Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']] = None, start_time: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay'] day: Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ if day is not None: pulumi.set(__self__, "day", day) if start_time is not None: pulumi.set(__self__, "start_time", start_time) @property @pulumi.getter def day(self) -> Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']]: """ Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. """ return pulumi.get(self, "day") @day.setter def day(self, value: Optional[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay']]): pulumi.set(self, "day", value) @property @pulumi.getter(name="startTime") def start_time(self) -> Optional[pulumi.Input[str]]: """ Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. """ return pulumi.get(self, "start_time") @start_time.setter def start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "start_time", value) @pulumi.input_type class ResourcePolicyWeeklyCycleArgs: def __init__(__self__, *, day_of_weeks: Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]] = None): """ Time window specified for weekly operations. :param pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]] day_of_weeks: Up to 7 intervals/windows, one for each day of the week. """ if day_of_weeks is not None: pulumi.set(__self__, "day_of_weeks", day_of_weeks) @property @pulumi.getter(name="dayOfWeeks") def day_of_weeks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]]: """ Up to 7 intervals/windows, one for each day of the week. """ return pulumi.get(self, "day_of_weeks") @day_of_weeks.setter def day_of_weeks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]]]): pulumi.set(self, "day_of_weeks", value) @pulumi.input_type class RolloutPolicyArgs: def __init__(__self__, *, default_rollout_time: Optional[pulumi.Input[str]] = None, location_rollout_policies: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ A rollout policy configuration. :param pulumi.Input[str] default_rollout_time: An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] location_rollout_policies: Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. """ if default_rollout_time is not None: pulumi.set(__self__, "default_rollout_time", default_rollout_time) if location_rollout_policies is not None: pulumi.set(__self__, "location_rollout_policies", location_rollout_policies) @property @pulumi.getter(name="defaultRolloutTime") def default_rollout_time(self) -> Optional[pulumi.Input[str]]: """ An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. """ return pulumi.get(self, "default_rollout_time") @default_rollout_time.setter def default_rollout_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_rollout_time", value) @property @pulumi.getter(name="locationRolloutPolicies") def location_rollout_policies(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. """ return pulumi.get(self, "location_rollout_policies") @location_rollout_policies.setter def location_rollout_policies(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "location_rollout_policies", value) @pulumi.input_type class RouterAdvertisedIpRangeArgs: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, range: Optional[pulumi.Input[str]] = None): """ Description-tagged IP ranges for the router to advertise. :param pulumi.Input[str] description: User-specified description for the IP range. :param pulumi.Input[str] range: The IP range to advertise. The value must be a CIDR-formatted string. """ if description is not None: pulumi.set(__self__, "description", description) if range is not None: pulumi.set(__self__, "range", range) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ User-specified description for the IP range. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def range(self) -> Optional[pulumi.Input[str]]: """ The IP range to advertise. The value must be a CIDR-formatted string. """ return pulumi.get(self, "range") @range.setter def range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range", value) @pulumi.input_type class RouterBgpPeerBfdArgs: def __init__(__self__, *, min_receive_interval: Optional[pulumi.Input[int]] = None, min_transmit_interval: Optional[pulumi.Input[int]] = None, mode: Optional[pulumi.Input['RouterBgpPeerBfdMode']] = None, multiplier: Optional[pulumi.Input[int]] = None, packet_mode: Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']] = None, session_initialization_mode: Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']] = None, slow_timer_interval: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] min_receive_interval: The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input[int] min_transmit_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input['RouterBgpPeerBfdMode'] mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] multiplier: The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. :param pulumi.Input['RouterBgpPeerBfdPacketMode'] packet_mode: The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. :param pulumi.Input['RouterBgpPeerBfdSessionInitializationMode'] session_initialization_mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] slow_timer_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. """ if min_receive_interval is not None: pulumi.set(__self__, "min_receive_interval", min_receive_interval) if min_transmit_interval is not None: pulumi.set(__self__, "min_transmit_interval", min_transmit_interval) if mode is not None: pulumi.set(__self__, "mode", mode) if multiplier is not None: pulumi.set(__self__, "multiplier", multiplier) if packet_mode is not None: pulumi.set(__self__, "packet_mode", packet_mode) if session_initialization_mode is not None: pulumi.set(__self__, "session_initialization_mode", session_initialization_mode) if slow_timer_interval is not None: pulumi.set(__self__, "slow_timer_interval", slow_timer_interval) @property @pulumi.getter(name="minReceiveInterval") def min_receive_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. """ return pulumi.get(self, "min_receive_interval") @min_receive_interval.setter def min_receive_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_receive_interval", value) @property @pulumi.getter(name="minTransmitInterval") def min_transmit_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. """ return pulumi.get(self, "min_transmit_interval") @min_transmit_interval.setter def min_transmit_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_transmit_interval", value) @property @pulumi.getter def mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdMode']]: """ The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. """ return pulumi.get(self, "mode") @mode.setter def mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdMode']]): pulumi.set(self, "mode", value) @property @pulumi.getter def multiplier(self) -> Optional[pulumi.Input[int]]: """ The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. """ return pulumi.get(self, "multiplier") @multiplier.setter def multiplier(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "multiplier", value) @property @pulumi.getter(name="packetMode") def packet_mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']]: """ The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. """ return pulumi.get(self, "packet_mode") @packet_mode.setter def packet_mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdPacketMode']]): pulumi.set(self, "packet_mode", value) @property @pulumi.getter(name="sessionInitializationMode") def session_initialization_mode(self) -> Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']]: """ The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. """ return pulumi.get(self, "session_initialization_mode") @session_initialization_mode.setter def session_initialization_mode(self, value: Optional[pulumi.Input['RouterBgpPeerBfdSessionInitializationMode']]): pulumi.set(self, "session_initialization_mode", value) @property @pulumi.getter(name="slowTimerInterval") def slow_timer_interval(self) -> Optional[pulumi.Input[int]]: """ The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. """ return pulumi.get(self, "slow_timer_interval") @slow_timer_interval.setter def slow_timer_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "slow_timer_interval", value) @pulumi.input_type class RouterBgpPeerArgs: def __init__(__self__, *, advertise_mode: Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']] = None, advertised_groups: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]] = None, advertised_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]] = None, advertised_route_priority: Optional[pulumi.Input[int]] = None, bfd: Optional[pulumi.Input['RouterBgpPeerBfdArgs']] = None, enable: Optional[pulumi.Input['RouterBgpPeerEnable']] = None, enable_ipv6: Optional[pulumi.Input[bool]] = None, interface_name: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, ipv6_nexthop_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, peer_asn: Optional[pulumi.Input[int]] = None, peer_ip_address: Optional[pulumi.Input[str]] = None, peer_ipv6_nexthop_address: Optional[pulumi.Input[str]] = None, router_appliance_instance: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['RouterBgpPeerAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] advertised_route_priority: The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. :param pulumi.Input['RouterBgpPeerBfdArgs'] bfd: BFD configuration for the BGP peering. :param pulumi.Input['RouterBgpPeerEnable'] enable: The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. :param pulumi.Input[bool] enable_ipv6: Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. :param pulumi.Input[str] interface_name: Name of the interface the BGP peer is associated with. :param pulumi.Input[str] ip_address: IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] ipv6_nexthop_address: IPv6 address of the interface inside Google Cloud Platform. :param pulumi.Input[str] name: Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[int] peer_asn: Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. :param pulumi.Input[str] peer_ip_address: IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] peer_ipv6_nexthop_address: IPv6 address of the BGP interface outside Google Cloud Platform. :param pulumi.Input[str] router_appliance_instance: URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. """ if advertise_mode is not None: pulumi.set(__self__, "advertise_mode", advertise_mode) if advertised_groups is not None: pulumi.set(__self__, "advertised_groups", advertised_groups) if advertised_ip_ranges is not None: pulumi.set(__self__, "advertised_ip_ranges", advertised_ip_ranges) if advertised_route_priority is not None: pulumi.set(__self__, "advertised_route_priority", advertised_route_priority) if bfd is not None: pulumi.set(__self__, "bfd", bfd) if enable is not None: pulumi.set(__self__, "enable", enable) if enable_ipv6 is not None: pulumi.set(__self__, "enable_ipv6", enable_ipv6) if interface_name is not None: pulumi.set(__self__, "interface_name", interface_name) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if ipv6_nexthop_address is not None: pulumi.set(__self__, "ipv6_nexthop_address", ipv6_nexthop_address) if name is not None: pulumi.set(__self__, "name", name) if peer_asn is not None: pulumi.set(__self__, "peer_asn", peer_asn) if peer_ip_address is not None: pulumi.set(__self__, "peer_ip_address", peer_ip_address) if peer_ipv6_nexthop_address is not None: pulumi.set(__self__, "peer_ipv6_nexthop_address", peer_ipv6_nexthop_address) if router_appliance_instance is not None: pulumi.set(__self__, "router_appliance_instance", router_appliance_instance) @property @pulumi.getter(name="advertiseMode") def advertise_mode(self) -> Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']]: """ User-specified flag to indicate which mode to use for advertisement. """ return pulumi.get(self, "advertise_mode") @advertise_mode.setter def advertise_mode(self, value: Optional[pulumi.Input['RouterBgpPeerAdvertiseMode']]): pulumi.set(self, "advertise_mode", value) @property @pulumi.getter(name="advertisedGroups") def advertised_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]]: """ User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. """ return pulumi.get(self, "advertised_groups") @advertised_groups.setter def advertised_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]]]): pulumi.set(self, "advertised_groups", value) @property @pulumi.getter(name="advertisedIpRanges") def advertised_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]: """ User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. """ return pulumi.get(self, "advertised_ip_ranges") @advertised_ip_ranges.setter def advertised_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]): pulumi.set(self, "advertised_ip_ranges", value) @property @pulumi.getter(name="advertisedRoutePriority") def advertised_route_priority(self) -> Optional[pulumi.Input[int]]: """ The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. """ return pulumi.get(self, "advertised_route_priority") @advertised_route_priority.setter def advertised_route_priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "advertised_route_priority", value) @property @pulumi.getter def bfd(self) -> Optional[pulumi.Input['RouterBgpPeerBfdArgs']]: """ BFD configuration for the BGP peering. """ return pulumi.get(self, "bfd") @bfd.setter def bfd(self, value: Optional[pulumi.Input['RouterBgpPeerBfdArgs']]): pulumi.set(self, "bfd", value) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input['RouterBgpPeerEnable']]: """ The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input['RouterBgpPeerEnable']]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="enableIpv6") def enable_ipv6(self) -> Optional[pulumi.Input[bool]]: """ Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. """ return pulumi.get(self, "enable_ipv6") @enable_ipv6.setter def enable_ipv6(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_ipv6", value) @property @pulumi.getter(name="interfaceName") def interface_name(self) -> Optional[pulumi.Input[str]]: """ Name of the interface the BGP peer is associated with. """ return pulumi.get(self, "interface_name") @interface_name.setter def interface_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interface_name", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="ipv6NexthopAddress") def ipv6_nexthop_address(self) -> Optional[pulumi.Input[str]]: """ IPv6 address of the interface inside Google Cloud Platform. """ return pulumi.get(self, "ipv6_nexthop_address") @ipv6_nexthop_address.setter def ipv6_nexthop_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ipv6_nexthop_address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="peerAsn") def peer_asn(self) -> Optional[pulumi.Input[int]]: """ Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. """ return pulumi.get(self, "peer_asn") @peer_asn.setter def peer_asn(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "peer_asn", value) @property @pulumi.getter(name="peerIpAddress") def peer_ip_address(self) -> Optional[pulumi.Input[str]]: """ IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. """ return pulumi.get(self, "peer_ip_address") @peer_ip_address.setter def peer_ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ip_address", value) @property @pulumi.getter(name="peerIpv6NexthopAddress") def peer_ipv6_nexthop_address(self) -> Optional[pulumi.Input[str]]: """ IPv6 address of the BGP interface outside Google Cloud Platform. """ return pulumi.get(self, "peer_ipv6_nexthop_address") @peer_ipv6_nexthop_address.setter def peer_ipv6_nexthop_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ipv6_nexthop_address", value) @property @pulumi.getter(name="routerApplianceInstance") def router_appliance_instance(self) -> Optional[pulumi.Input[str]]: """ URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. """ return pulumi.get(self, "router_appliance_instance") @router_appliance_instance.setter def router_appliance_instance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "router_appliance_instance", value) @pulumi.input_type class RouterBgpArgs: def __init__(__self__, *, advertise_mode: Optional[pulumi.Input['RouterBgpAdvertiseMode']] = None, advertised_groups: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]] = None, advertised_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]] = None, asn: Optional[pulumi.Input[int]] = None, keepalive_interval: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input['RouterBgpAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. :param pulumi.Input[int] keepalive_interval: The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. """ if advertise_mode is not None: pulumi.set(__self__, "advertise_mode", advertise_mode) if advertised_groups is not None: pulumi.set(__self__, "advertised_groups", advertised_groups) if advertised_ip_ranges is not None: pulumi.set(__self__, "advertised_ip_ranges", advertised_ip_ranges) if asn is not None: pulumi.set(__self__, "asn", asn) if keepalive_interval is not None: pulumi.set(__self__, "keepalive_interval", keepalive_interval) @property @pulumi.getter(name="advertiseMode") def advertise_mode(self) -> Optional[pulumi.Input['RouterBgpAdvertiseMode']]: """ User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. """ return pulumi.get(self, "advertise_mode") @advertise_mode.setter def advertise_mode(self, value: Optional[pulumi.Input['RouterBgpAdvertiseMode']]): pulumi.set(self, "advertise_mode", value) @property @pulumi.getter(name="advertisedGroups") def advertised_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]]: """ User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. """ return pulumi.get(self, "advertised_groups") @advertised_groups.setter def advertised_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]]]): pulumi.set(self, "advertised_groups", value) @property @pulumi.getter(name="advertisedIpRanges") def advertised_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]: """ User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. """ return pulumi.get(self, "advertised_ip_ranges") @advertised_ip_ranges.setter def advertised_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]]]): pulumi.set(self, "advertised_ip_ranges", value) @property @pulumi.getter def asn(self) -> Optional[pulumi.Input[int]]: """ Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. """ return pulumi.get(self, "asn") @asn.setter def asn(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "asn", value) @property @pulumi.getter(name="keepaliveInterval") def keepalive_interval(self) -> Optional[pulumi.Input[int]]: """ The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. """ return pulumi.get(self, "keepalive_interval") @keepalive_interval.setter def keepalive_interval(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "keepalive_interval", value) @pulumi.input_type class RouterInterfaceArgs: def __init__(__self__, *, ip_range: Optional[pulumi.Input[str]] = None, linked_interconnect_attachment: Optional[pulumi.Input[str]] = None, linked_vpn_tunnel: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, private_ip_address: Optional[pulumi.Input[str]] = None, redundant_interface: Optional[pulumi.Input[str]] = None, subnetwork: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] ip_range: IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. :param pulumi.Input[str] linked_interconnect_attachment: URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] linked_vpn_tunnel: URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] name: Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] private_ip_address: The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. :param pulumi.Input[str] redundant_interface: Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] subnetwork: The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. """ if ip_range is not None: pulumi.set(__self__, "ip_range", ip_range) if linked_interconnect_attachment is not None: pulumi.set(__self__, "linked_interconnect_attachment", linked_interconnect_attachment) if linked_vpn_tunnel is not None: pulumi.set(__self__, "linked_vpn_tunnel", linked_vpn_tunnel) if name is not None: pulumi.set(__self__, "name", name) if private_ip_address is not None: pulumi.set(__self__, "private_ip_address", private_ip_address) if redundant_interface is not None: pulumi.set(__self__, "redundant_interface", redundant_interface) if subnetwork is not None: pulumi.set(__self__, "subnetwork", subnetwork) @property @pulumi.getter(name="ipRange") def ip_range(self) -> Optional[pulumi.Input[str]]: """ IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. """ return pulumi.get(self, "ip_range") @ip_range.setter def ip_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_range", value) @property @pulumi.getter(name="linkedInterconnectAttachment") def linked_interconnect_attachment(self) -> Optional[pulumi.Input[str]]: """ URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. """ return pulumi.get(self, "linked_interconnect_attachment") @linked_interconnect_attachment.setter def linked_interconnect_attachment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "linked_interconnect_attachment", value) @property @pulumi.getter(name="linkedVpnTunnel") def linked_vpn_tunnel(self) -> Optional[pulumi.Input[str]]: """ URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. """ return pulumi.get(self, "linked_vpn_tunnel") @linked_vpn_tunnel.setter def linked_vpn_tunnel(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "linked_vpn_tunnel", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="privateIpAddress") def private_ip_address(self) -> Optional[pulumi.Input[str]]: """ The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. """ return pulumi.get(self, "private_ip_address") @private_ip_address.setter def private_ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_ip_address", value) @property @pulumi.getter(name="redundantInterface") def redundant_interface(self) -> Optional[pulumi.Input[str]]: """ Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "redundant_interface") @redundant_interface.setter def redundant_interface(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redundant_interface", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @pulumi.input_type class RouterNatLogConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, filter: Optional[pulumi.Input['RouterNatLogConfigFilter']] = None): """ Configuration of logging on a NAT. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default. :param pulumi.Input['RouterNatLogConfigFilter'] filter: Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. """ if enable is not None: pulumi.set(__self__, "enable", enable) if filter is not None: pulumi.set(__self__, "filter", filter) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Indicates whether or not to export logs. This is false by default. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter def filter(self) -> Optional[pulumi.Input['RouterNatLogConfigFilter']]: """ Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. """ return pulumi.get(self, "filter") @filter.setter def filter(self, value: Optional[pulumi.Input['RouterNatLogConfigFilter']]): pulumi.set(self, "filter", value) @pulumi.input_type class RouterNatRuleActionArgs: def __init__(__self__, *, source_nat_active_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_active_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_drain_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_nat_drain_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ips: A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ranges: A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ranges: A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. """ if source_nat_active_ips is not None: pulumi.set(__self__, "source_nat_active_ips", source_nat_active_ips) if source_nat_active_ranges is not None: pulumi.set(__self__, "source_nat_active_ranges", source_nat_active_ranges) if source_nat_drain_ips is not None: pulumi.set(__self__, "source_nat_drain_ips", source_nat_drain_ips) if source_nat_drain_ranges is not None: pulumi.set(__self__, "source_nat_drain_ranges", source_nat_drain_ranges) @property @pulumi.getter(name="sourceNatActiveIps") def source_nat_active_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. """ return pulumi.get(self, "source_nat_active_ips") @source_nat_active_ips.setter def source_nat_active_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_active_ips", value) @property @pulumi.getter(name="sourceNatActiveRanges") def source_nat_active_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. """ return pulumi.get(self, "source_nat_active_ranges") @source_nat_active_ranges.setter def source_nat_active_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_active_ranges", value) @property @pulumi.getter(name="sourceNatDrainIps") def source_nat_drain_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. """ return pulumi.get(self, "source_nat_drain_ips") @source_nat_drain_ips.setter def source_nat_drain_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_drain_ips", value) @property @pulumi.getter(name="sourceNatDrainRanges") def source_nat_drain_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. """ return pulumi.get(self, "source_nat_drain_ranges") @source_nat_drain_ranges.setter def source_nat_drain_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "source_nat_drain_ranges", value) @pulumi.input_type class RouterNatRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input['RouterNatRuleActionArgs']] = None, description: Optional[pulumi.Input[str]] = None, match: Optional[pulumi.Input[str]] = None, rule_number: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input['RouterNatRuleActionArgs'] action: The action to be enforced for traffic that matches this rule. :param pulumi.Input[str] description: An optional description of this rule. :param pulumi.Input[str] match: CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" :param pulumi.Input[int] rule_number: An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if match is not None: pulumi.set(__self__, "match", match) if rule_number is not None: pulumi.set(__self__, "rule_number", rule_number) @property @pulumi.getter def action(self) -> Optional[pulumi.Input['RouterNatRuleActionArgs']]: """ The action to be enforced for traffic that matches this rule. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input['RouterNatRuleActionArgs']]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this rule. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input[str]]: """ CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "match", value) @property @pulumi.getter(name="ruleNumber") def rule_number(self) -> Optional[pulumi.Input[int]]: """ An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. """ return pulumi.get(self, "rule_number") @rule_number.setter def rule_number(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rule_number", value) @pulumi.input_type class RouterNatSubnetworkToNatArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, secondary_ip_range_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, source_ip_ranges_to_nat: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]] = None): """ Defines the IP ranges that want to use NAT for a subnetwork. :param pulumi.Input[str] name: URL for the subnetwork resource that will use NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] secondary_ip_range_names: A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]] source_ip_ranges_to_nat: Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] """ if name is not None: pulumi.set(__self__, "name", name) if secondary_ip_range_names is not None: pulumi.set(__self__, "secondary_ip_range_names", secondary_ip_range_names) if source_ip_ranges_to_nat is not None: pulumi.set(__self__, "source_ip_ranges_to_nat", source_ip_ranges_to_nat) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ URL for the subnetwork resource that will use NAT. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="secondaryIpRangeNames") def secondary_ip_range_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. """ return pulumi.get(self, "secondary_ip_range_names") @secondary_ip_range_names.setter def secondary_ip_range_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "secondary_ip_range_names", value) @property @pulumi.getter(name="sourceIpRangesToNat") def source_ip_ranges_to_nat(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]]: """ Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] """ return pulumi.get(self, "source_ip_ranges_to_nat") @source_ip_ranges_to_nat.setter def source_ip_ranges_to_nat(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]]]): pulumi.set(self, "source_ip_ranges_to_nat", value) @pulumi.input_type class RouterNatArgs: def __init__(__self__, *, drain_nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, enable_dynamic_port_allocation: Optional[pulumi.Input[bool]] = None, enable_endpoint_independent_mapping: Optional[pulumi.Input[bool]] = None, icmp_idle_timeout_sec: Optional[pulumi.Input[int]] = None, log_config: Optional[pulumi.Input['RouterNatLogConfigArgs']] = None, max_ports_per_vm: Optional[pulumi.Input[int]] = None, min_ports_per_vm: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, nat_ip_allocate_option: Optional[pulumi.Input['RouterNatNatIpAllocateOption']] = None, nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]] = None, source_subnetwork_ip_ranges_to_nat: Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']] = None, subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]] = None, tcp_established_idle_timeout_sec: Optional[pulumi.Input[int]] = None, tcp_time_wait_timeout_sec: Optional[pulumi.Input[int]] = None, tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[int]] = None, type: Optional[pulumi.Input['RouterNatType']] = None, udp_idle_timeout_sec: Optional[pulumi.Input[int]] = None): """ Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided. :param pulumi.Input[Sequence[pulumi.Input[str]]] drain_nat_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. :param pulumi.Input[bool] enable_dynamic_port_allocation: Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] icmp_idle_timeout_sec: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatLogConfigArgs'] log_config: Configure logging on this NAT. :param pulumi.Input[int] max_ports_per_vm: Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] min_ports_per_vm: Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. :param pulumi.Input[str] name: Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. :param pulumi.Input['RouterNatNatIpAllocateOption'] nat_ip_allocate_option: Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] nat_ips: A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. :param pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]] rules: A list of rules associated with this NAT. :param pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat'] source_subnetwork_ip_ranges_to_nat: Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]] subnetworks: A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. :param pulumi.Input[int] tcp_established_idle_timeout_sec: Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. :param pulumi.Input[int] tcp_time_wait_timeout_sec: Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. :param pulumi.Input[int] tcp_transitory_idle_timeout_sec: Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatType'] type: Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. :param pulumi.Input[int] udp_idle_timeout_sec: Timeout (in seconds) for UDP connections. Defaults to 30s if not set. """ if drain_nat_ips is not None: pulumi.set(__self__, "drain_nat_ips", drain_nat_ips) if enable_dynamic_port_allocation is not None: pulumi.set(__self__, "enable_dynamic_port_allocation", enable_dynamic_port_allocation) if enable_endpoint_independent_mapping is not None: pulumi.set(__self__, "enable_endpoint_independent_mapping", enable_endpoint_independent_mapping) if icmp_idle_timeout_sec is not None: pulumi.set(__self__, "icmp_idle_timeout_sec", icmp_idle_timeout_sec) if log_config is not None: pulumi.set(__self__, "log_config", log_config) if max_ports_per_vm is not None: pulumi.set(__self__, "max_ports_per_vm", max_ports_per_vm) if min_ports_per_vm is not None: pulumi.set(__self__, "min_ports_per_vm", min_ports_per_vm) if name is not None: pulumi.set(__self__, "name", name) if nat_ip_allocate_option is not None: pulumi.set(__self__, "nat_ip_allocate_option", nat_ip_allocate_option) if nat_ips is not None: pulumi.set(__self__, "nat_ips", nat_ips) if rules is not None: pulumi.set(__self__, "rules", rules) if source_subnetwork_ip_ranges_to_nat is not None: pulumi.set(__self__, "source_subnetwork_ip_ranges_to_nat", source_subnetwork_ip_ranges_to_nat) if subnetworks is not None: pulumi.set(__self__, "subnetworks", subnetworks) if tcp_established_idle_timeout_sec is not None: pulumi.set(__self__, "tcp_established_idle_timeout_sec", tcp_established_idle_timeout_sec) if tcp_time_wait_timeout_sec is not None: pulumi.set(__self__, "tcp_time_wait_timeout_sec", tcp_time_wait_timeout_sec) if tcp_transitory_idle_timeout_sec is not None: pulumi.set(__self__, "tcp_transitory_idle_timeout_sec", tcp_transitory_idle_timeout_sec) if type is not None: pulumi.set(__self__, "type", type) if udp_idle_timeout_sec is not None: pulumi.set(__self__, "udp_idle_timeout_sec", udp_idle_timeout_sec) @property @pulumi.getter(name="drainNatIps") def drain_nat_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. """ return pulumi.get(self, "drain_nat_ips") @drain_nat_ips.setter def drain_nat_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "drain_nat_ips", value) @property @pulumi.getter(name="enableDynamicPortAllocation") def enable_dynamic_port_allocation(self) -> Optional[pulumi.Input[bool]]: """ Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. """ return pulumi.get(self, "enable_dynamic_port_allocation") @enable_dynamic_port_allocation.setter def enable_dynamic_port_allocation(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_dynamic_port_allocation", value) @property @pulumi.getter(name="enableEndpointIndependentMapping") def enable_endpoint_independent_mapping(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_endpoint_independent_mapping") @enable_endpoint_independent_mapping.setter def enable_endpoint_independent_mapping(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_endpoint_independent_mapping", value) @property @pulumi.getter(name="icmpIdleTimeoutSec") def icmp_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. """ return pulumi.get(self, "icmp_idle_timeout_sec") @icmp_idle_timeout_sec.setter def icmp_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "icmp_idle_timeout_sec", value) @property @pulumi.getter(name="logConfig") def log_config(self) -> Optional[pulumi.Input['RouterNatLogConfigArgs']]: """ Configure logging on this NAT. """ return pulumi.get(self, "log_config") @log_config.setter def log_config(self, value: Optional[pulumi.Input['RouterNatLogConfigArgs']]): pulumi.set(self, "log_config", value) @property @pulumi.getter(name="maxPortsPerVm") def max_ports_per_vm(self) -> Optional[pulumi.Input[int]]: """ Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. """ return pulumi.get(self, "max_ports_per_vm") @max_ports_per_vm.setter def max_ports_per_vm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_ports_per_vm", value) @property @pulumi.getter(name="minPortsPerVm") def min_ports_per_vm(self) -> Optional[pulumi.Input[int]]: """ Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. """ return pulumi.get(self, "min_ports_per_vm") @min_ports_per_vm.setter def min_ports_per_vm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_ports_per_vm", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="natIpAllocateOption") def nat_ip_allocate_option(self) -> Optional[pulumi.Input['RouterNatNatIpAllocateOption']]: """ Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. """ return pulumi.get(self, "nat_ip_allocate_option") @nat_ip_allocate_option.setter def nat_ip_allocate_option(self, value: Optional[pulumi.Input['RouterNatNatIpAllocateOption']]): pulumi.set(self, "nat_ip_allocate_option", value) @property @pulumi.getter(name="natIps") def nat_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. """ return pulumi.get(self, "nat_ips") @nat_ips.setter def nat_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "nat_ips", value) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]]: """ A list of rules associated with this NAT. """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]]): pulumi.set(self, "rules", value) @property @pulumi.getter(name="sourceSubnetworkIpRangesToNat") def source_subnetwork_ip_ranges_to_nat(self) -> Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']]: """ Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. """ return pulumi.get(self, "source_subnetwork_ip_ranges_to_nat") @source_subnetwork_ip_ranges_to_nat.setter def source_subnetwork_ip_ranges_to_nat(self, value: Optional[pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat']]): pulumi.set(self, "source_subnetwork_ip_ranges_to_nat", value) @property @pulumi.getter def subnetworks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]]: """ A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. """ return pulumi.get(self, "subnetworks") @subnetworks.setter def subnetworks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]]]): pulumi.set(self, "subnetworks", value) @property @pulumi.getter(name="tcpEstablishedIdleTimeoutSec") def tcp_established_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. """ return pulumi.get(self, "tcp_established_idle_timeout_sec") @tcp_established_idle_timeout_sec.setter def tcp_established_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_established_idle_timeout_sec", value) @property @pulumi.getter(name="tcpTimeWaitTimeoutSec") def tcp_time_wait_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. """ return pulumi.get(self, "tcp_time_wait_timeout_sec") @tcp_time_wait_timeout_sec.setter def tcp_time_wait_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_time_wait_timeout_sec", value) @property @pulumi.getter(name="tcpTransitoryIdleTimeoutSec") def tcp_transitory_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. """ return pulumi.get(self, "tcp_transitory_idle_timeout_sec") @tcp_transitory_idle_timeout_sec.setter def tcp_transitory_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "tcp_transitory_idle_timeout_sec", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['RouterNatType']]: """ Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['RouterNatType']]): pulumi.set(self, "type", value) @property @pulumi.getter(name="udpIdleTimeoutSec") def udp_idle_timeout_sec(self) -> Optional[pulumi.Input[int]]: """ Timeout (in seconds) for UDP connections. Defaults to 30s if not set. """ return pulumi.get(self, "udp_idle_timeout_sec") @udp_idle_timeout_sec.setter def udp_idle_timeout_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "udp_idle_timeout_sec", value) @pulumi.input_type class RuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input['RuleAction']] = None, conditions: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]] = None, description: Optional[pulumi.Input[str]] = None, ins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, log_configs: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]] = None, not_ins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ This is deprecated and has no effect. Do not use. :param pulumi.Input['RuleAction'] action: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]] conditions: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] description: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]] log_configs: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] not_ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: This is deprecated and has no effect. Do not use. """ if action is not None: pulumi.set(__self__, "action", action) if conditions is not None: pulumi.set(__self__, "conditions", conditions) if description is not None: pulumi.set(__self__, "description", description) if ins is not None: pulumi.set(__self__, "ins", ins) if log_configs is not None: pulumi.set(__self__, "log_configs", log_configs) if not_ins is not None: pulumi.set(__self__, "not_ins", not_ins) if permissions is not None: pulumi.set(__self__, "permissions", permissions) @property @pulumi.getter def action(self) -> Optional[pulumi.Input['RuleAction']]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input['RuleAction']]): pulumi.set(self, "action", value) @property @pulumi.getter def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "conditions") @conditions.setter def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]]]): pulumi.set(self, "conditions", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def ins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "ins") @ins.setter def ins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ins", value) @property @pulumi.getter(name="logConfigs") def log_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "log_configs") @log_configs.setter def log_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]]]): pulumi.set(self, "log_configs", value) @property @pulumi.getter(name="notIns") def not_ins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "not_ins") @not_ins.setter def not_ins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "not_ins", value) @property @pulumi.getter def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ This is deprecated and has no effect. Do not use. """ return pulumi.get(self, "permissions") @permissions.setter def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "permissions", value) @pulumi.input_type class SSLHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['SSLHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['SSLHealthCheckProxyHeader']] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['SSLHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. :param pulumi.Input['SSLHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['SSLHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['SSLHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['SSLHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['SSLHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class SavedDiskArgs: def __init__(__self__, *, source_disk: Optional[pulumi.Input[str]] = None): """ An instance-attached disk resource. :param pulumi.Input[str] source_disk: Specifies a URL of the disk attached to the source instance. """ if source_disk is not None: pulumi.set(__self__, "source_disk", source_disk) @property @pulumi.getter(name="sourceDisk") def source_disk(self) -> Optional[pulumi.Input[str]]: """ Specifies a URL of the disk attached to the source instance. """ return pulumi.get(self, "source_disk") @source_disk.setter def source_disk(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_disk", value) @pulumi.input_type class SchedulingNodeAffinityArgs: def __init__(__self__, *, key: Optional[pulumi.Input[str]] = None, operator: Optional[pulumi.Input['SchedulingNodeAffinityOperator']] = None, values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled. :param pulumi.Input[str] key: Corresponds to the label key of Node resource. :param pulumi.Input['SchedulingNodeAffinityOperator'] operator: Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of Node resource. """ if key is not None: pulumi.set(__self__, "key", key) if operator is not None: pulumi.set(__self__, "operator", operator) if values is not None: pulumi.set(__self__, "values", values) @property @pulumi.getter def key(self) -> Optional[pulumi.Input[str]]: """ Corresponds to the label key of Node resource. """ return pulumi.get(self, "key") @key.setter def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter def operator(self) -> Optional[pulumi.Input['SchedulingNodeAffinityOperator']]: """ Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. """ return pulumi.get(self, "operator") @operator.setter def operator(self, value: Optional[pulumi.Input['SchedulingNodeAffinityOperator']]): pulumi.set(self, "operator", value) @property @pulumi.getter def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Corresponds to the label values of Node resource. """ return pulumi.get(self, "values") @values.setter def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "values", value) @pulumi.input_type class SchedulingArgs: def __init__(__self__, *, automatic_restart: Optional[pulumi.Input[bool]] = None, availability_domain: Optional[pulumi.Input[int]] = None, current_cpus: Optional[pulumi.Input[int]] = None, current_memory_mb: Optional[pulumi.Input[str]] = None, host_error_timeout_seconds: Optional[pulumi.Input[int]] = None, instance_termination_action: Optional[pulumi.Input['SchedulingInstanceTerminationAction']] = None, latency_tolerant: Optional[pulumi.Input[bool]] = None, location_hint: Optional[pulumi.Input[str]] = None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]] = None, maintenance_interval: Optional[pulumi.Input['SchedulingMaintenanceInterval']] = None, max_run_duration: Optional[pulumi.Input['DurationArgs']] = None, min_node_cpus: Optional[pulumi.Input[int]] = None, node_affinities: Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]] = None, on_host_maintenance: Optional[pulumi.Input['SchedulingOnHostMaintenance']] = None, preemptible: Optional[pulumi.Input[bool]] = None, provisioning_model: Optional[pulumi.Input['SchedulingProvisioningModel']] = None, termination_time: Optional[pulumi.Input[str]] = None): """ Sets the scheduling options for an Instance. NextID: 21 :param pulumi.Input[bool] automatic_restart: Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. :param pulumi.Input[int] availability_domain: Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. :param pulumi.Input[int] current_cpus: Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. :param pulumi.Input[str] current_memory_mb: Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. :param pulumi.Input[int] host_error_timeout_seconds: Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. :param pulumi.Input['SchedulingInstanceTerminationAction'] instance_termination_action: Specifies the termination action for the instance. :param pulumi.Input[bool] latency_tolerant: Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. :param pulumi.Input[str] location_hint: An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. :param pulumi.Input['SchedulingMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input['DurationArgs'] max_run_duration: Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. :param pulumi.Input[int] min_node_cpus: The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. :param pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]] node_affinities: A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. :param pulumi.Input['SchedulingOnHostMaintenance'] on_host_maintenance: Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. :param pulumi.Input[bool] preemptible: Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. :param pulumi.Input['SchedulingProvisioningModel'] provisioning_model: Specifies the provisioning model of the instance. :param pulumi.Input[str] termination_time: Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. """ if automatic_restart is not None: pulumi.set(__self__, "automatic_restart", automatic_restart) if availability_domain is not None: pulumi.set(__self__, "availability_domain", availability_domain) if current_cpus is not None: pulumi.set(__self__, "current_cpus", current_cpus) if current_memory_mb is not None: pulumi.set(__self__, "current_memory_mb", current_memory_mb) if host_error_timeout_seconds is not None: pulumi.set(__self__, "host_error_timeout_seconds", host_error_timeout_seconds) if instance_termination_action is not None: pulumi.set(__self__, "instance_termination_action", instance_termination_action) if latency_tolerant is not None: pulumi.set(__self__, "latency_tolerant", latency_tolerant) if location_hint is not None: pulumi.set(__self__, "location_hint", location_hint) if maintenance_freeze_duration_hours is not None: pulumi.set(__self__, "maintenance_freeze_duration_hours", maintenance_freeze_duration_hours) if maintenance_interval is not None: pulumi.set(__self__, "maintenance_interval", maintenance_interval) if max_run_duration is not None: pulumi.set(__self__, "max_run_duration", max_run_duration) if min_node_cpus is not None: pulumi.set(__self__, "min_node_cpus", min_node_cpus) if node_affinities is not None: pulumi.set(__self__, "node_affinities", node_affinities) if on_host_maintenance is not None: pulumi.set(__self__, "on_host_maintenance", on_host_maintenance) if preemptible is not None: pulumi.set(__self__, "preemptible", preemptible) if provisioning_model is not None: pulumi.set(__self__, "provisioning_model", provisioning_model) if termination_time is not None: pulumi.set(__self__, "termination_time", termination_time) @property @pulumi.getter(name="automaticRestart") def automatic_restart(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. """ return pulumi.get(self, "automatic_restart") @automatic_restart.setter def automatic_restart(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "automatic_restart", value) @property @pulumi.getter(name="availabilityDomain") def availability_domain(self) -> Optional[pulumi.Input[int]]: """ Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. """ return pulumi.get(self, "availability_domain") @availability_domain.setter def availability_domain(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "availability_domain", value) @property @pulumi.getter(name="currentCpus") def current_cpus(self) -> Optional[pulumi.Input[int]]: """ Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. """ return pulumi.get(self, "current_cpus") @current_cpus.setter def current_cpus(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "current_cpus", value) @property @pulumi.getter(name="currentMemoryMb") def current_memory_mb(self) -> Optional[pulumi.Input[str]]: """ Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. """ return pulumi.get(self, "current_memory_mb") @current_memory_mb.setter def current_memory_mb(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "current_memory_mb", value) @property @pulumi.getter(name="hostErrorTimeoutSeconds") def host_error_timeout_seconds(self) -> Optional[pulumi.Input[int]]: """ Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. """ return pulumi.get(self, "host_error_timeout_seconds") @host_error_timeout_seconds.setter def host_error_timeout_seconds(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "host_error_timeout_seconds", value) @property @pulumi.getter(name="instanceTerminationAction") def instance_termination_action(self) -> Optional[pulumi.Input['SchedulingInstanceTerminationAction']]: """ Specifies the termination action for the instance. """ return pulumi.get(self, "instance_termination_action") @instance_termination_action.setter def instance_termination_action(self, value: Optional[pulumi.Input['SchedulingInstanceTerminationAction']]): pulumi.set(self, "instance_termination_action", value) @property @pulumi.getter(name="latencyTolerant") def latency_tolerant(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. """ return pulumi.get(self, "latency_tolerant") @latency_tolerant.setter def latency_tolerant(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "latency_tolerant", value) @property @pulumi.getter(name="locationHint") def location_hint(self) -> Optional[pulumi.Input[str]]: """ An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. """ return pulumi.get(self, "location_hint") @location_hint.setter def location_hint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location_hint", value) @property @pulumi.getter(name="maintenanceFreezeDurationHours") def maintenance_freeze_duration_hours(self) -> Optional[pulumi.Input[int]]: """ Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. """ return pulumi.get(self, "maintenance_freeze_duration_hours") @maintenance_freeze_duration_hours.setter def maintenance_freeze_duration_hours(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "maintenance_freeze_duration_hours", value) @property @pulumi.getter(name="maintenanceInterval") def maintenance_interval(self) -> Optional[pulumi.Input['SchedulingMaintenanceInterval']]: """ For more information about maintenance intervals, see Setting maintenance intervals. """ return pulumi.get(self, "maintenance_interval") @maintenance_interval.setter def maintenance_interval(self, value: Optional[pulumi.Input['SchedulingMaintenanceInterval']]): pulumi.set(self, "maintenance_interval", value) @property @pulumi.getter(name="maxRunDuration") def max_run_duration(self) -> Optional[pulumi.Input['DurationArgs']]: """ Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. """ return pulumi.get(self, "max_run_duration") @max_run_duration.setter def max_run_duration(self, value: Optional[pulumi.Input['DurationArgs']]): pulumi.set(self, "max_run_duration", value) @property @pulumi.getter(name="minNodeCpus") def min_node_cpus(self) -> Optional[pulumi.Input[int]]: """ The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. """ return pulumi.get(self, "min_node_cpus") @min_node_cpus.setter def min_node_cpus(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_node_cpus", value) @property @pulumi.getter(name="nodeAffinities") def node_affinities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]]: """ A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. """ return pulumi.get(self, "node_affinities") @node_affinities.setter def node_affinities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]]]): pulumi.set(self, "node_affinities", value) @property @pulumi.getter(name="onHostMaintenance") def on_host_maintenance(self) -> Optional[pulumi.Input['SchedulingOnHostMaintenance']]: """ Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. """ return pulumi.get(self, "on_host_maintenance") @on_host_maintenance.setter def on_host_maintenance(self, value: Optional[pulumi.Input['SchedulingOnHostMaintenance']]): pulumi.set(self, "on_host_maintenance", value) @property @pulumi.getter def preemptible(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. """ return pulumi.get(self, "preemptible") @preemptible.setter def preemptible(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "preemptible", value) @property @pulumi.getter(name="provisioningModel") def provisioning_model(self) -> Optional[pulumi.Input['SchedulingProvisioningModel']]: """ Specifies the provisioning model of the instance. """ return pulumi.get(self, "provisioning_model") @provisioning_model.setter def provisioning_model(self, value: Optional[pulumi.Input['SchedulingProvisioningModel']]): pulumi.set(self, "provisioning_model", value) @property @pulumi.getter(name="terminationTime") def termination_time(self) -> Optional[pulumi.Input[str]]: """ Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. """ return pulumi.get(self, "termination_time") @termination_time.setter def termination_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "termination_time", value) @pulumi.input_type class SdsConfigArgs: def __init__(__self__, *, grpc_service_config: Optional[pulumi.Input['GrpcServiceConfigArgs']] = None): """ [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. :param pulumi.Input['GrpcServiceConfigArgs'] grpc_service_config: The configuration to access the SDS server over GRPC. """ if grpc_service_config is not None: pulumi.set(__self__, "grpc_service_config", grpc_service_config) @property @pulumi.getter(name="grpcServiceConfig") def grpc_service_config(self) -> Optional[pulumi.Input['GrpcServiceConfigArgs']]: """ The configuration to access the SDS server over GRPC. """ return pulumi.get(self, "grpc_service_config") @grpc_service_config.setter def grpc_service_config(self, value: Optional[pulumi.Input['GrpcServiceConfigArgs']]): pulumi.set(self, "grpc_service_config", value) @pulumi.input_type class SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs: def __init__(__self__, *, enable: Optional[pulumi.Input[bool]] = None, rule_visibility: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']] = None): """ Configuration options for L7 DDoS detection. :param pulumi.Input[bool] enable: If set to true, enables CAAP for L7 DDoS detection. :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility'] rule_visibility: Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. """ if enable is not None: pulumi.set(__self__, "enable", enable) if rule_visibility is not None: pulumi.set(__self__, "rule_visibility", rule_visibility) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ If set to true, enables CAAP for L7 DDoS detection. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="ruleVisibility") def rule_visibility(self) -> Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']]: """ Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. """ return pulumi.get(self, "rule_visibility") @rule_visibility.setter def rule_visibility(self, value: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility']]): pulumi.set(self, "rule_visibility", value) @pulumi.input_type class SecurityPolicyAdaptiveProtectionConfigArgs: def __init__(__self__, *, layer7_ddos_defense_config: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']] = None): """ Configuration options for Cloud Armor Adaptive Protection (CAAP). :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs'] layer7_ddos_defense_config: If set to true, enables Cloud Armor Machine Learning. """ if layer7_ddos_defense_config is not None: pulumi.set(__self__, "layer7_ddos_defense_config", layer7_ddos_defense_config) @property @pulumi.getter(name="layer7DdosDefenseConfig") def layer7_ddos_defense_config(self) -> Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']]: """ If set to true, enables Cloud Armor Machine Learning. """ return pulumi.get(self, "layer7_ddos_defense_config") @layer7_ddos_defense_config.setter def layer7_ddos_defense_config(self, value: Optional[pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs']]): pulumi.set(self, "layer7_ddos_defense_config", value) @pulumi.input_type class SecurityPolicyAdvancedOptionsConfigArgs: def __init__(__self__, *, json_parsing: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']] = None, log_level: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']] = None): if json_parsing is not None: pulumi.set(__self__, "json_parsing", json_parsing) if log_level is not None: pulumi.set(__self__, "log_level", log_level) @property @pulumi.getter(name="jsonParsing") def json_parsing(self) -> Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']]: return pulumi.get(self, "json_parsing") @json_parsing.setter def json_parsing(self, value: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigJsonParsing']]): pulumi.set(self, "json_parsing", value) @property @pulumi.getter(name="logLevel") def log_level(self) -> Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']]: return pulumi.get(self, "log_level") @log_level.setter def log_level(self, value: Optional[pulumi.Input['SecurityPolicyAdvancedOptionsConfigLogLevel']]): pulumi.set(self, "log_level", value) @pulumi.input_type class SecurityPolicyAssociationArgs: def __init__(__self__, *, attachment_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] attachment_id: The resource that the security policy is attached to. :param pulumi.Input[str] name: The name for an association. """ if attachment_id is not None: pulumi.set(__self__, "attachment_id", attachment_id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="attachmentId") def attachment_id(self) -> Optional[pulumi.Input[str]]: """ The resource that the security policy is attached to. """ return pulumi.get(self, "attachment_id") @attachment_id.setter def attachment_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "attachment_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name for an association. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class SecurityPolicyCloudArmorConfigArgs: def __init__(__self__, *, enable_ml: Optional[pulumi.Input[bool]] = None): """ Configuration options for Cloud Armor. :param pulumi.Input[bool] enable_ml: If set to true, enables Cloud Armor Machine Learning. """ if enable_ml is not None: pulumi.set(__self__, "enable_ml", enable_ml) @property @pulumi.getter(name="enableMl") def enable_ml(self) -> Optional[pulumi.Input[bool]]: """ If set to true, enables Cloud Armor Machine Learning. """ return pulumi.get(self, "enable_ml") @enable_ml.setter def enable_ml(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_ml", value) @pulumi.input_type class SecurityPolicyDdosProtectionConfigArgs: def __init__(__self__, *, ddos_protection: Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']] = None): if ddos_protection is not None: pulumi.set(__self__, "ddos_protection", ddos_protection) @property @pulumi.getter(name="ddosProtection") def ddos_protection(self) -> Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']]: return pulumi.get(self, "ddos_protection") @ddos_protection.setter def ddos_protection(self, value: Optional[pulumi.Input['SecurityPolicyDdosProtectionConfigDdosProtection']]): pulumi.set(self, "ddos_protection", value) @pulumi.input_type class SecurityPolicyRecaptchaOptionsConfigArgs: def __init__(__self__, *, redirect_site_key: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] redirect_site_key: An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. """ if redirect_site_key is not None: pulumi.set(__self__, "redirect_site_key", redirect_site_key) @property @pulumi.getter(name="redirectSiteKey") def redirect_site_key(self) -> Optional[pulumi.Input[str]]: """ An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. """ return pulumi.get(self, "redirect_site_key") @redirect_site_key.setter def redirect_site_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redirect_site_key", value) @pulumi.input_type class SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs: def __init__(__self__, *, header_name: Optional[pulumi.Input[str]] = None, header_value: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] header_name: The name of the header to set. :param pulumi.Input[str] header_value: The value to set the named header to. """ if header_name is not None: pulumi.set(__self__, "header_name", header_name) if header_value is not None: pulumi.set(__self__, "header_value", header_value) @property @pulumi.getter(name="headerName") def header_name(self) -> Optional[pulumi.Input[str]]: """ The name of the header to set. """ return pulumi.get(self, "header_name") @header_name.setter def header_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_name", value) @property @pulumi.getter(name="headerValue") def header_value(self) -> Optional[pulumi.Input[str]]: """ The value to set the named header to. """ return pulumi.get(self, "header_value") @header_value.setter def header_value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "header_value", value) @pulumi.input_type class SecurityPolicyRuleHttpHeaderActionArgs: def __init__(__self__, *, request_headers_to_adds: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]] request_headers_to_adds: The list of request headers to add or overwrite if they're already present. """ if request_headers_to_adds is not None: pulumi.set(__self__, "request_headers_to_adds", request_headers_to_adds) @property @pulumi.getter(name="requestHeadersToAdds") def request_headers_to_adds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]]: """ The list of request headers to add or overwrite if they're already present. """ return pulumi.get(self, "request_headers_to_adds") @request_headers_to_adds.setter def request_headers_to_adds(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]]]): pulumi.set(self, "request_headers_to_adds", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigDestinationPortArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigLayer4ConfigArgs: def __init__(__self__, *, ip_protocol: Optional[pulumi.Input[str]] = None, ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ if ip_protocol is not None: pulumi.set(__self__, "ip_protocol", ip_protocol) if ports is not None: pulumi.set(__self__, "ports", ports) @property @pulumi.getter(name="ipProtocol") def ip_protocol(self) -> Optional[pulumi.Input[str]]: """ The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. """ return pulumi.get(self, "ip_protocol") @ip_protocol.setter def ip_protocol(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_protocol", value) @property @pulumi.getter def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "ports") @ports.setter def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "ports", value) @pulumi.input_type class SecurityPolicyRuleMatcherConfigArgs: def __init__(__self__, *, dest_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dest_ports: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]] = None, layer4_configs: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]] = None, src_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]] dest_ports: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. """ if dest_ip_ranges is not None: pulumi.set(__self__, "dest_ip_ranges", dest_ip_ranges) if dest_ports is not None: pulumi.set(__self__, "dest_ports", dest_ports) if layer4_configs is not None: pulumi.set(__self__, "layer4_configs", layer4_configs) if src_ip_ranges is not None: pulumi.set(__self__, "src_ip_ranges", src_ip_ranges) @property @pulumi.getter(name="destIpRanges") def dest_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "dest_ip_ranges") @dest_ip_ranges.setter def dest_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dest_ip_ranges", value) @property @pulumi.getter(name="destPorts") def dest_ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "dest_ports") @dest_ports.setter def dest_ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]]]): pulumi.set(self, "dest_ports", value) @property @pulumi.getter(name="layer4Configs") def layer4_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]]: """ Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "layer4_configs") @layer4_configs.setter def layer4_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]]]): pulumi.set(self, "layer4_configs", value) @property @pulumi.getter(name="srcIpRanges") def src_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. """ return pulumi.get(self, "src_ip_ranges") @src_ip_ranges.setter def src_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "src_ip_ranges", value) @pulumi.input_type class SecurityPolicyRuleMatcherArgs: def __init__(__self__, *, config: Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']] = None, expr: Optional[pulumi.Input['ExprArgs']] = None, versioned_expr: Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']] = None): """ Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input['SecurityPolicyRuleMatcherConfigArgs'] config: The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. :param pulumi.Input['ExprArgs'] expr: User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. :param pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr'] versioned_expr: Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. """ if config is not None: pulumi.set(__self__, "config", config) if expr is not None: pulumi.set(__self__, "expr", expr) if versioned_expr is not None: pulumi.set(__self__, "versioned_expr", versioned_expr) @property @pulumi.getter def config(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']]: """ The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. """ return pulumi.get(self, "config") @config.setter def config(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherConfigArgs']]): pulumi.set(self, "config", value) @property @pulumi.getter def expr(self) -> Optional[pulumi.Input['ExprArgs']]: """ User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. """ return pulumi.get(self, "expr") @expr.setter def expr(self, value: Optional[pulumi.Input['ExprArgs']]): pulumi.set(self, "expr", value) @property @pulumi.getter(name="versionedExpr") def versioned_expr(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']]: """ Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. """ return pulumi.get(self, "versioned_expr") @versioned_expr.setter def versioned_expr(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr']]): pulumi.set(self, "versioned_expr", value) @pulumi.input_type class SecurityPolicyRuleRateLimitOptionsThresholdArgs: def __init__(__self__, *, count: Optional[pulumi.Input[int]] = None, interval_sec: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] count: Number of HTTP(S) requests for calculating the threshold. :param pulumi.Input[int] interval_sec: Interval over which the threshold is computed. """ if count is not None: pulumi.set(__self__, "count", count) if interval_sec is not None: pulumi.set(__self__, "interval_sec", interval_sec) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[int]]: """ Number of HTTP(S) requests for calculating the threshold. """ return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="intervalSec") def interval_sec(self) -> Optional[pulumi.Input[int]]: """ Interval over which the threshold is computed. """ return pulumi.get(self, "interval_sec") @interval_sec.setter def interval_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "interval_sec", value) @pulumi.input_type class SecurityPolicyRuleRateLimitOptionsArgs: def __init__(__self__, *, ban_duration_sec: Optional[pulumi.Input[int]] = None, ban_threshold: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']] = None, conform_action: Optional[pulumi.Input[str]] = None, enforce_on_key: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']] = None, enforce_on_key_name: Optional[pulumi.Input[str]] = None, exceed_action: Optional[pulumi.Input[str]] = None, exceed_redirect_options: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']] = None, rate_limit_threshold: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']] = None): """ :param pulumi.Input[int] ban_duration_sec: Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] ban_threshold: Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. :param pulumi.Input[str] conform_action: Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey'] enforce_on_key: Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. :param pulumi.Input[str] enforce_on_key_name: Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. :param pulumi.Input[str] exceed_action: Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] exceed_redirect_options: Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] rate_limit_threshold: Threshold at which to begin ratelimiting. """ if ban_duration_sec is not None: pulumi.set(__self__, "ban_duration_sec", ban_duration_sec) if ban_threshold is not None: pulumi.set(__self__, "ban_threshold", ban_threshold) if conform_action is not None: pulumi.set(__self__, "conform_action", conform_action) if enforce_on_key is not None: pulumi.set(__self__, "enforce_on_key", enforce_on_key) if enforce_on_key_name is not None: pulumi.set(__self__, "enforce_on_key_name", enforce_on_key_name) if exceed_action is not None: pulumi.set(__self__, "exceed_action", exceed_action) if exceed_redirect_options is not None: pulumi.set(__self__, "exceed_redirect_options", exceed_redirect_options) if rate_limit_threshold is not None: pulumi.set(__self__, "rate_limit_threshold", rate_limit_threshold) @property @pulumi.getter(name="banDurationSec") def ban_duration_sec(self) -> Optional[pulumi.Input[int]]: """ Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. """ return pulumi.get(self, "ban_duration_sec") @ban_duration_sec.setter def ban_duration_sec(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ban_duration_sec", value) @property @pulumi.getter(name="banThreshold") def ban_threshold(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]: """ Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. """ return pulumi.get(self, "ban_threshold") @ban_threshold.setter def ban_threshold(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]): pulumi.set(self, "ban_threshold", value) @property @pulumi.getter(name="conformAction") def conform_action(self) -> Optional[pulumi.Input[str]]: """ Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. """ return pulumi.get(self, "conform_action") @conform_action.setter def conform_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "conform_action", value) @property @pulumi.getter(name="enforceOnKey") def enforce_on_key(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']]: """ Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. """ return pulumi.get(self, "enforce_on_key") @enforce_on_key.setter def enforce_on_key(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey']]): pulumi.set(self, "enforce_on_key", value) @property @pulumi.getter(name="enforceOnKeyName") def enforce_on_key_name(self) -> Optional[pulumi.Input[str]]: """ Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. """ return pulumi.get(self, "enforce_on_key_name") @enforce_on_key_name.setter def enforce_on_key_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "enforce_on_key_name", value) @property @pulumi.getter(name="exceedAction") def exceed_action(self) -> Optional[pulumi.Input[str]]: """ Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. """ return pulumi.get(self, "exceed_action") @exceed_action.setter def exceed_action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "exceed_action", value) @property @pulumi.getter(name="exceedRedirectOptions") def exceed_redirect_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]: """ Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. """ return pulumi.get(self, "exceed_redirect_options") @exceed_redirect_options.setter def exceed_redirect_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]): pulumi.set(self, "exceed_redirect_options", value) @property @pulumi.getter(name="rateLimitThreshold") def rate_limit_threshold(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]: """ Threshold at which to begin ratelimiting. """ return pulumi.get(self, "rate_limit_threshold") @rate_limit_threshold.setter def rate_limit_threshold(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs']]): pulumi.set(self, "rate_limit_threshold", value) @pulumi.input_type class SecurityPolicyRuleRedirectOptionsArgs: def __init__(__self__, *, target: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']] = None): """ :param pulumi.Input[str] target: Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsType'] type: Type of the redirect action. """ if target is not None: pulumi.set(__self__, "target", target) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def target(self) -> Optional[pulumi.Input[str]]: """ Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. """ return pulumi.get(self, "target") @target.setter def target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']]: """ Type of the redirect action. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsType']]): pulumi.set(self, "type", value) @pulumi.input_type class SecurityPolicyRuleArgs: def __init__(__self__, *, action: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, direction: Optional[pulumi.Input['SecurityPolicyRuleDirection']] = None, enable_logging: Optional[pulumi.Input[bool]] = None, header_action: Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']] = None, match: Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']] = None, preview: Optional[pulumi.Input[bool]] = None, priority: Optional[pulumi.Input[int]] = None, rate_limit_options: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']] = None, redirect_options: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']] = None, redirect_target: Optional[pulumi.Input[str]] = None, rule_number: Optional[pulumi.Input[str]] = None, target_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, target_service_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['SecurityPolicyRuleDirection'] direction: The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. :param pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs'] header_action: Optional, additional actions that are performed on headers. :param pulumi.Input['SecurityPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[bool] preview: If set to true, the specified action is not enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs'] rate_limit_options: Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] redirect_options: Parameters defining the redirect action. Cannot be specified for any other actions. :param pulumi.Input[str] redirect_target: This must be specified for redirect actions. Cannot be specified for any other actions. :param pulumi.Input[str] rule_number: Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. """ if action is not None: pulumi.set(__self__, "action", action) if description is not None: pulumi.set(__self__, "description", description) if direction is not None: pulumi.set(__self__, "direction", direction) if enable_logging is not None: pulumi.set(__self__, "enable_logging", enable_logging) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if match is not None: pulumi.set(__self__, "match", match) if preview is not None: pulumi.set(__self__, "preview", preview) if priority is not None: pulumi.set(__self__, "priority", priority) if rate_limit_options is not None: pulumi.set(__self__, "rate_limit_options", rate_limit_options) if redirect_options is not None: pulumi.set(__self__, "redirect_options", redirect_options) if redirect_target is not None: pulumi.set(__self__, "redirect_target", redirect_target) if rule_number is not None: pulumi.set(__self__, "rule_number", rule_number) if target_resources is not None: pulumi.set(__self__, "target_resources", target_resources) if target_service_accounts is not None: pulumi.set(__self__, "target_service_accounts", target_service_accounts) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. Provide this property when you create the resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def direction(self) -> Optional[pulumi.Input['SecurityPolicyRuleDirection']]: """ The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "direction") @direction.setter def direction(self, value: Optional[pulumi.Input['SecurityPolicyRuleDirection']]): pulumi.set(self, "direction", value) @property @pulumi.getter(name="enableLogging") def enable_logging(self) -> Optional[pulumi.Input[bool]]: """ Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. """ return pulumi.get(self, "enable_logging") @enable_logging.setter def enable_logging(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_logging", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']]: """ Optional, additional actions that are performed on headers. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def match(self) -> Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']]: """ A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. """ return pulumi.get(self, "match") @match.setter def match(self, value: Optional[pulumi.Input['SecurityPolicyRuleMatcherArgs']]): pulumi.set(self, "match", value) @property @pulumi.getter def preview(self) -> Optional[pulumi.Input[bool]]: """ If set to true, the specified action is not enforced. """ return pulumi.get(self, "preview") @preview.setter def preview(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "preview", value) @property @pulumi.getter def priority(self) -> Optional[pulumi.Input[int]]: """ An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. """ return pulumi.get(self, "priority") @priority.setter def priority(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "priority", value) @property @pulumi.getter(name="rateLimitOptions") def rate_limit_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']]: """ Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. """ return pulumi.get(self, "rate_limit_options") @rate_limit_options.setter def rate_limit_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs']]): pulumi.set(self, "rate_limit_options", value) @property @pulumi.getter(name="redirectOptions") def redirect_options(self) -> Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]: """ Parameters defining the redirect action. Cannot be specified for any other actions. """ return pulumi.get(self, "redirect_options") @redirect_options.setter def redirect_options(self, value: Optional[pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs']]): pulumi.set(self, "redirect_options", value) @property @pulumi.getter(name="redirectTarget") def redirect_target(self) -> Optional[pulumi.Input[str]]: """ This must be specified for redirect actions. Cannot be specified for any other actions. """ return pulumi.get(self, "redirect_target") @redirect_target.setter def redirect_target(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "redirect_target", value) @property @pulumi.getter(name="ruleNumber") def rule_number(self) -> Optional[pulumi.Input[str]]: """ Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. """ return pulumi.get(self, "rule_number") @rule_number.setter def rule_number(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rule_number", value) @property @pulumi.getter(name="targetResources") def target_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. """ return pulumi.get(self, "target_resources") @target_resources.setter def target_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_resources", value) @property @pulumi.getter(name="targetServiceAccounts") def target_service_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of service accounts indicating the sets of instances that are applied with this rule. """ return pulumi.get(self, "target_service_accounts") @target_service_accounts.setter def target_service_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "target_service_accounts", value) @pulumi.input_type class SecuritySettingsArgs: def __init__(__self__, *, client_tls_policy: Optional[pulumi.Input[str]] = None, subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The authentication and authorization settings for a BackendService. :param pulumi.Input[str] client_tls_policy: Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. """ if client_tls_policy is not None: pulumi.set(__self__, "client_tls_policy", client_tls_policy) if subject_alt_names is not None: pulumi.set(__self__, "subject_alt_names", subject_alt_names) @property @pulumi.getter(name="clientTlsPolicy") def client_tls_policy(self) -> Optional[pulumi.Input[str]]: """ Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. """ return pulumi.get(self, "client_tls_policy") @client_tls_policy.setter def client_tls_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "client_tls_policy", value) @property @pulumi.getter(name="subjectAltNames") def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. """ return pulumi.get(self, "subject_alt_names") @subject_alt_names.setter def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "subject_alt_names", value) @pulumi.input_type class ServerBindingArgs: def __init__(__self__, *, type: Optional[pulumi.Input['ServerBindingType']] = None): if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter def type(self) -> Optional[pulumi.Input['ServerBindingType']]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input['ServerBindingType']]): pulumi.set(self, "type", value) @pulumi.input_type class ServerTlsSettingsArgs: def __init__(__self__, *, proxy_tls_context: Optional[pulumi.Input['TlsContextArgs']] = None, subject_alt_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tls_mode: Optional[pulumi.Input['ServerTlsSettingsTlsMode']] = None): """ The TLS settings for the server. :param pulumi.Input['TlsContextArgs'] proxy_tls_context: Configures the mechanism to obtain security certificates and identity information. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: A list of alternate names to verify the subject identity in the certificate presented by the client. :param pulumi.Input['ServerTlsSettingsTlsMode'] tls_mode: Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. """ if proxy_tls_context is not None: pulumi.set(__self__, "proxy_tls_context", proxy_tls_context) if subject_alt_names is not None: pulumi.set(__self__, "subject_alt_names", subject_alt_names) if tls_mode is not None: pulumi.set(__self__, "tls_mode", tls_mode) @property @pulumi.getter(name="proxyTlsContext") def proxy_tls_context(self) -> Optional[pulumi.Input['TlsContextArgs']]: """ Configures the mechanism to obtain security certificates and identity information. """ return pulumi.get(self, "proxy_tls_context") @proxy_tls_context.setter def proxy_tls_context(self, value: Optional[pulumi.Input['TlsContextArgs']]): pulumi.set(self, "proxy_tls_context", value) @property @pulumi.getter(name="subjectAltNames") def subject_alt_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of alternate names to verify the subject identity in the certificate presented by the client. """ return pulumi.get(self, "subject_alt_names") @subject_alt_names.setter def subject_alt_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "subject_alt_names", value) @property @pulumi.getter(name="tlsMode") def tls_mode(self) -> Optional[pulumi.Input['ServerTlsSettingsTlsMode']]: """ Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. """ return pulumi.get(self, "tls_mode") @tls_mode.setter def tls_mode(self, value: Optional[pulumi.Input['ServerTlsSettingsTlsMode']]): pulumi.set(self, "tls_mode", value) @pulumi.input_type class ServiceAccountArgs: def __init__(__self__, *, email: Optional[pulumi.Input[str]] = None, scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ A service account. :param pulumi.Input[str] email: Email address of the service account. :param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: The list of scopes to be made available for this service account. """ if email is not None: pulumi.set(__self__, "email", email) if scopes is not None: pulumi.set(__self__, "scopes", scopes) @property @pulumi.getter def email(self) -> Optional[pulumi.Input[str]]: """ Email address of the service account. """ return pulumi.get(self, "email") @email.setter def email(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "email", value) @property @pulumi.getter def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of scopes to be made available for this service account. """ return pulumi.get(self, "scopes") @scopes.setter def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "scopes", value) @pulumi.input_type class ServiceAttachmentConsumerProjectLimitArgs: def __init__(__self__, *, connection_limit: Optional[pulumi.Input[int]] = None, project_id_or_num: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] connection_limit: The value of the limit to set. :param pulumi.Input[str] project_id_or_num: The project id or number for the project to set the limit for. """ if connection_limit is not None: pulumi.set(__self__, "connection_limit", connection_limit) if project_id_or_num is not None: pulumi.set(__self__, "project_id_or_num", project_id_or_num) @property @pulumi.getter(name="connectionLimit") def connection_limit(self) -> Optional[pulumi.Input[int]]: """ The value of the limit to set. """ return pulumi.get(self, "connection_limit") @connection_limit.setter def connection_limit(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "connection_limit", value) @property @pulumi.getter(name="projectIdOrNum") def project_id_or_num(self) -> Optional[pulumi.Input[str]]: """ The project id or number for the project to set the limit for. """ return pulumi.get(self, "project_id_or_num") @project_id_or_num.setter def project_id_or_num(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project_id_or_num", value) @pulumi.input_type class ShareSettingsArgs: def __init__(__self__, *, folder_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, project_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, projects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, share_type: Optional[pulumi.Input['ShareSettingsShareType']] = None): """ The share setting for reservations and sole tenancy node groups. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] folder_map: A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] project_map: A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input[Sequence[pulumi.Input[str]]] projects: A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input['ShareSettingsShareType'] share_type: Type of sharing for this shared-reservation """ if folder_map is not None: pulumi.set(__self__, "folder_map", folder_map) if project_map is not None: pulumi.set(__self__, "project_map", project_map) if projects is not None: pulumi.set(__self__, "projects", projects) if share_type is not None: pulumi.set(__self__, "share_type", share_type) @property @pulumi.getter(name="folderMap") def folder_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. """ return pulumi.get(self, "folder_map") @folder_map.setter def folder_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "folder_map", value) @property @pulumi.getter(name="projectMap") def project_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. """ return pulumi.get(self, "project_map") @project_map.setter def project_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "project_map", value) @property @pulumi.getter def projects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. """ return pulumi.get(self, "projects") @projects.setter def projects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "projects", value) @property @pulumi.getter(name="shareType") def share_type(self) -> Optional[pulumi.Input['ShareSettingsShareType']]: """ Type of sharing for this shared-reservation """ return pulumi.get(self, "share_type") @share_type.setter def share_type(self, value: Optional[pulumi.Input['ShareSettingsShareType']]): pulumi.set(self, "share_type", value) @pulumi.input_type class ShieldedInstanceConfigArgs: def __init__(__self__, *, enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None, enable_secure_boot: Optional[pulumi.Input[bool]] = None, enable_vtpm: Optional[pulumi.Input[bool]] = None): """ A set of Shielded Instance options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. Enabled by default. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. Disabled by default. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. Enabled by default. """ if enable_integrity_monitoring is not None: pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring) if enable_secure_boot is not None: pulumi.set(__self__, "enable_secure_boot", enable_secure_boot) if enable_vtpm is not None: pulumi.set(__self__, "enable_vtpm", enable_vtpm) @property @pulumi.getter(name="enableIntegrityMonitoring") def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has integrity monitoring enabled. Enabled by default. """ return pulumi.get(self, "enable_integrity_monitoring") @enable_integrity_monitoring.setter def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_integrity_monitoring", value) @property @pulumi.getter(name="enableSecureBoot") def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Secure Boot enabled. Disabled by default. """ return pulumi.get(self, "enable_secure_boot") @enable_secure_boot.setter def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_secure_boot", value) @property @pulumi.getter(name="enableVtpm") def enable_vtpm(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has the vTPM enabled. Enabled by default. """ return pulumi.get(self, "enable_vtpm") @enable_vtpm.setter def enable_vtpm(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_vtpm", value) @pulumi.input_type class ShieldedInstanceIntegrityPolicyArgs: def __init__(__self__, *, update_auto_learn_policy: Optional[pulumi.Input[bool]] = None): """ The policy describes the baseline against which Instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ if update_auto_learn_policy is not None: pulumi.set(__self__, "update_auto_learn_policy", update_auto_learn_policy) @property @pulumi.getter(name="updateAutoLearnPolicy") def update_auto_learn_policy(self) -> Optional[pulumi.Input[bool]]: """ Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ return pulumi.get(self, "update_auto_learn_policy") @update_auto_learn_policy.setter def update_auto_learn_policy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "update_auto_learn_policy", value) @pulumi.input_type class ShieldedVmConfigArgs: def __init__(__self__, *, enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None, enable_secure_boot: Optional[pulumi.Input[bool]] = None, enable_vtpm: Optional[pulumi.Input[bool]] = None): """ A set of Shielded VM options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. """ if enable_integrity_monitoring is not None: pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring) if enable_secure_boot is not None: pulumi.set(__self__, "enable_secure_boot", enable_secure_boot) if enable_vtpm is not None: pulumi.set(__self__, "enable_vtpm", enable_vtpm) @property @pulumi.getter(name="enableIntegrityMonitoring") def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has integrity monitoring enabled. """ return pulumi.get(self, "enable_integrity_monitoring") @enable_integrity_monitoring.setter def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_integrity_monitoring", value) @property @pulumi.getter(name="enableSecureBoot") def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has Secure Boot enabled. """ return pulumi.get(self, "enable_secure_boot") @enable_secure_boot.setter def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_secure_boot", value) @property @pulumi.getter(name="enableVtpm") def enable_vtpm(self) -> Optional[pulumi.Input[bool]]: """ Defines whether the instance has the vTPM enabled. """ return pulumi.get(self, "enable_vtpm") @enable_vtpm.setter def enable_vtpm(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_vtpm", value) @pulumi.input_type class ShieldedVmIntegrityPolicyArgs: def __init__(__self__, *, update_auto_learn_policy: Optional[pulumi.Input[bool]] = None): """ The policy describes the baseline against which VM instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ if update_auto_learn_policy is not None: pulumi.set(__self__, "update_auto_learn_policy", update_auto_learn_policy) @property @pulumi.getter(name="updateAutoLearnPolicy") def update_auto_learn_policy(self) -> Optional[pulumi.Input[bool]]: """ Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. """ return pulumi.get(self, "update_auto_learn_policy") @update_auto_learn_policy.setter def update_auto_learn_policy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "update_auto_learn_policy", value) @pulumi.input_type class SourceDiskEncryptionKeyArgs: def __init__(__self__, *, disk_encryption_key: Optional[pulumi.Input['CustomerEncryptionKeyArgs']] = None, source_disk: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. :param pulumi.Input[str] source_disk: URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk """ if disk_encryption_key is not None: pulumi.set(__self__, "disk_encryption_key", disk_encryption_key) if source_disk is not None: pulumi.set(__self__, "source_disk", source_disk) @property @pulumi.getter(name="diskEncryptionKey") def disk_encryption_key(self) -> Optional[pulumi.Input['CustomerEncryptionKeyArgs']]: """ The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. """ return pulumi.get(self, "disk_encryption_key") @disk_encryption_key.setter def disk_encryption_key(self, value: Optional[pulumi.Input['CustomerEncryptionKeyArgs']]): pulumi.set(self, "disk_encryption_key", value) @property @pulumi.getter(name="sourceDisk") def source_disk(self) -> Optional[pulumi.Input[str]]: """ URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk """ return pulumi.get(self, "source_disk") @source_disk.setter def source_disk(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "source_disk", value) @pulumi.input_type class SourceInstanceParamsArgs: def __init__(__self__, *, disk_configs: Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]] = None): """ A specification of the parameters to use when creating the instance template from a source instance. :param pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]] disk_configs: Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. """ if disk_configs is not None: pulumi.set(__self__, "disk_configs", disk_configs) @property @pulumi.getter(name="diskConfigs") def disk_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]]: """ Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. """ return pulumi.get(self, "disk_configs") @disk_configs.setter def disk_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]]]): pulumi.set(self, "disk_configs", value) @pulumi.input_type class SslCertificateManagedSslCertificateArgs: def __init__(__self__, *, domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Configuration and status of a managed SSL certificate. :param pulumi.Input[Sequence[pulumi.Input[str]]] domains: The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). """ if domains is not None: pulumi.set(__self__, "domains", domains) @property @pulumi.getter def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). """ return pulumi.get(self, "domains") @domains.setter def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "domains", value) @pulumi.input_type class SslCertificateSelfManagedSslCertificateArgs: def __init__(__self__, *, certificate: Optional[pulumi.Input[str]] = None, private_key: Optional[pulumi.Input[str]] = None): """ Configuration and status of a self-managed SSL certificate. :param pulumi.Input[str] certificate: A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. :param pulumi.Input[str] private_key: A write-only private key in PEM format. Only insert requests will include this field. """ if certificate is not None: pulumi.set(__self__, "certificate", certificate) if private_key is not None: pulumi.set(__self__, "private_key", private_key) @property @pulumi.getter def certificate(self) -> Optional[pulumi.Input[str]]: """ A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. """ return pulumi.get(self, "certificate") @certificate.setter def certificate(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate", value) @property @pulumi.getter(name="privateKey") def private_key(self) -> Optional[pulumi.Input[str]]: """ A write-only private key in PEM format. Only insert requests will include this field. """ return pulumi.get(self, "private_key") @private_key.setter def private_key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_key", value) @pulumi.input_type class StatefulPolicyPreservedStateArgs: def __init__(__self__, *, disks: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, external_ips: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, internal_ips: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Configuration of preserved resources. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] disks: Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] external_ips: External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] internal_ips: Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ if disks is not None: pulumi.set(__self__, "disks", disks) if external_ips is not None: pulumi.set(__self__, "external_ips", external_ips) if internal_ips is not None: pulumi.set(__self__, "internal_ips", internal_ips) @property @pulumi.getter def disks(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. """ return pulumi.get(self, "disks") @disks.setter def disks(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "disks", value) @property @pulumi.getter(name="externalIPs") def external_ips(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ return pulumi.get(self, "external_ips") @external_ips.setter def external_ips(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "external_ips", value) @property @pulumi.getter(name="internalIPs") def internal_ips(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. """ return pulumi.get(self, "internal_ips") @internal_ips.setter def internal_ips(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "internal_ips", value) @pulumi.input_type class StatefulPolicyArgs: def __init__(__self__, *, preserved_state: Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']] = None): if preserved_state is not None: pulumi.set(__self__, "preserved_state", preserved_state) @property @pulumi.getter(name="preservedState") def preserved_state(self) -> Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']]: return pulumi.get(self, "preserved_state") @preserved_state.setter def preserved_state(self, value: Optional[pulumi.Input['StatefulPolicyPreservedStateArgs']]): pulumi.set(self, "preserved_state", value) @pulumi.input_type class SubnetworkLogConfigArgs: def __init__(__self__, *, aggregation_interval: Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']] = None, enable: Optional[pulumi.Input[bool]] = None, filter_expr: Optional[pulumi.Input[str]] = None, flow_sampling: Optional[pulumi.Input[float]] = None, metadata: Optional[pulumi.Input['SubnetworkLogConfigMetadata']] = None, metadata_fields: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ The available logging options for this subnetwork. :param pulumi.Input['SubnetworkLogConfigAggregationInterval'] aggregation_interval: Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. :param pulumi.Input[bool] enable: Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. :param pulumi.Input[str] filter_expr: Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. :param pulumi.Input[float] flow_sampling: Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. :param pulumi.Input['SubnetworkLogConfigMetadata'] metadata: Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. :param pulumi.Input[Sequence[pulumi.Input[str]]] metadata_fields: Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. """ if aggregation_interval is not None: pulumi.set(__self__, "aggregation_interval", aggregation_interval) if enable is not None: pulumi.set(__self__, "enable", enable) if filter_expr is not None: pulumi.set(__self__, "filter_expr", filter_expr) if flow_sampling is not None: pulumi.set(__self__, "flow_sampling", flow_sampling) if metadata is not None: pulumi.set(__self__, "metadata", metadata) if metadata_fields is not None: pulumi.set(__self__, "metadata_fields", metadata_fields) @property @pulumi.getter(name="aggregationInterval") def aggregation_interval(self) -> Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']]: """ Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. """ return pulumi.get(self, "aggregation_interval") @aggregation_interval.setter def aggregation_interval(self, value: Optional[pulumi.Input['SubnetworkLogConfigAggregationInterval']]): pulumi.set(self, "aggregation_interval", value) @property @pulumi.getter def enable(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. """ return pulumi.get(self, "enable") @enable.setter def enable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable", value) @property @pulumi.getter(name="filterExpr") def filter_expr(self) -> Optional[pulumi.Input[str]]: """ Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. """ return pulumi.get(self, "filter_expr") @filter_expr.setter def filter_expr(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "filter_expr", value) @property @pulumi.getter(name="flowSampling") def flow_sampling(self) -> Optional[pulumi.Input[float]]: """ Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. """ return pulumi.get(self, "flow_sampling") @flow_sampling.setter def flow_sampling(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "flow_sampling", value) @property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['SubnetworkLogConfigMetadata']]: """ Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: Optional[pulumi.Input['SubnetworkLogConfigMetadata']]): pulumi.set(self, "metadata", value) @property @pulumi.getter(name="metadataFields") def metadata_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. """ return pulumi.get(self, "metadata_fields") @metadata_fields.setter def metadata_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "metadata_fields", value) @pulumi.input_type class SubnetworkSecondaryRangeArgs: def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]] = None, range_name: Optional[pulumi.Input[str]] = None, reserved_internal_range: Optional[pulumi.Input[str]] = None): """ Represents a secondary IP range of a subnetwork. :param pulumi.Input[str] ip_cidr_range: The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. :param pulumi.Input[str] range_name: The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. :param pulumi.Input[str] reserved_internal_range: The URL of the reserved internal range. """ if ip_cidr_range is not None: pulumi.set(__self__, "ip_cidr_range", ip_cidr_range) if range_name is not None: pulumi.set(__self__, "range_name", range_name) if reserved_internal_range is not None: pulumi.set(__self__, "reserved_internal_range", reserved_internal_range) @property @pulumi.getter(name="ipCidrRange") def ip_cidr_range(self) -> Optional[pulumi.Input[str]]: """ The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. """ return pulumi.get(self, "ip_cidr_range") @ip_cidr_range.setter def ip_cidr_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_cidr_range", value) @property @pulumi.getter(name="rangeName") def range_name(self) -> Optional[pulumi.Input[str]]: """ The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. """ return pulumi.get(self, "range_name") @range_name.setter def range_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "range_name", value) @property @pulumi.getter(name="reservedInternalRange") def reserved_internal_range(self) -> Optional[pulumi.Input[str]]: """ The URL of the reserved internal range. """ return pulumi.get(self, "reserved_internal_range") @reserved_internal_range.setter def reserved_internal_range(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reserved_internal_range", value) @pulumi.input_type class SubsettingArgs: def __init__(__self__, *, policy: Optional[pulumi.Input['SubsettingPolicy']] = None, subset_size: Optional[pulumi.Input[int]] = None): """ Subsetting configuration for this BackendService. Currently this is applicable only for Internal TCP/UDP load balancing, Internal HTTP(S) load balancing and Traffic Director. :param pulumi.Input[int] subset_size: The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. """ if policy is not None: pulumi.set(__self__, "policy", policy) if subset_size is not None: pulumi.set(__self__, "subset_size", subset_size) @property @pulumi.getter def policy(self) -> Optional[pulumi.Input['SubsettingPolicy']]: return pulumi.get(self, "policy") @policy.setter def policy(self, value: Optional[pulumi.Input['SubsettingPolicy']]): pulumi.set(self, "policy", value) @property @pulumi.getter(name="subsetSize") def subset_size(self) -> Optional[pulumi.Input[int]]: """ The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. """ return pulumi.get(self, "subset_size") @subset_size.setter def subset_size(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "subset_size", value) @pulumi.input_type class TCPHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, port_specification: Optional[pulumi.Input['TCPHealthCheckPortSpecification']] = None, proxy_header: Optional[pulumi.Input['TCPHealthCheckProxyHeader']] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['TCPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. :param pulumi.Input['TCPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if port_specification is not None: pulumi.set(__self__, "port_specification", port_specification) if proxy_header is not None: pulumi.set(__self__, "proxy_header", proxy_header) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter(name="portSpecification") def port_specification(self) -> Optional[pulumi.Input['TCPHealthCheckPortSpecification']]: """ Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. """ return pulumi.get(self, "port_specification") @port_specification.setter def port_specification(self, value: Optional[pulumi.Input['TCPHealthCheckPortSpecification']]): pulumi.set(self, "port_specification", value) @property @pulumi.getter(name="proxyHeader") def proxy_header(self) -> Optional[pulumi.Input['TCPHealthCheckProxyHeader']]: """ Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. """ return pulumi.get(self, "proxy_header") @proxy_header.setter def proxy_header(self, value: Optional[pulumi.Input['TCPHealthCheckProxyHeader']]): pulumi.set(self, "proxy_header", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class TagsArgs: def __init__(__self__, *, items: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ A set of instance tags. :param pulumi.Input[Sequence[pulumi.Input[str]]] items: An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. """ if items is not None: pulumi.set(__self__, "items", items) @property @pulumi.getter def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. """ return pulumi.get(self, "items") @items.setter def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "items", value) @pulumi.input_type class TlsCertificateContextArgs: def __init__(__self__, *, certificate_paths: Optional[pulumi.Input['TlsCertificatePathsArgs']] = None, certificate_source: Optional[pulumi.Input['TlsCertificateContextCertificateSource']] = None, sds_config: Optional[pulumi.Input['SdsConfigArgs']] = None): """ [Deprecated] Defines the mechanism to obtain the client or server certificate. Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsCertificatePathsArgs'] certificate_paths: Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. :param pulumi.Input['TlsCertificateContextCertificateSource'] certificate_source: Defines how TLS certificates are obtained. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ if certificate_paths is not None: pulumi.set(__self__, "certificate_paths", certificate_paths) if certificate_source is not None: pulumi.set(__self__, "certificate_source", certificate_source) if sds_config is not None: pulumi.set(__self__, "sds_config", sds_config) @property @pulumi.getter(name="certificatePaths") def certificate_paths(self) -> Optional[pulumi.Input['TlsCertificatePathsArgs']]: """ Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. """ return pulumi.get(self, "certificate_paths") @certificate_paths.setter def certificate_paths(self, value: Optional[pulumi.Input['TlsCertificatePathsArgs']]): pulumi.set(self, "certificate_paths", value) @property @pulumi.getter(name="certificateSource") def certificate_source(self) -> Optional[pulumi.Input['TlsCertificateContextCertificateSource']]: """ Defines how TLS certificates are obtained. """ return pulumi.get(self, "certificate_source") @certificate_source.setter def certificate_source(self, value: Optional[pulumi.Input['TlsCertificateContextCertificateSource']]): pulumi.set(self, "certificate_source", value) @property @pulumi.getter(name="sdsConfig") def sds_config(self) -> Optional[pulumi.Input['SdsConfigArgs']]: """ Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ return pulumi.get(self, "sds_config") @sds_config.setter def sds_config(self, value: Optional[pulumi.Input['SdsConfigArgs']]): pulumi.set(self, "sds_config", value) @pulumi.input_type class TlsCertificatePathsArgs: def __init__(__self__, *, certificate_path: Optional[pulumi.Input[str]] = None, private_key_path: Optional[pulumi.Input[str]] = None): """ [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. :param pulumi.Input[str] certificate_path: The path to the file holding the client or server TLS certificate to use. :param pulumi.Input[str] private_key_path: The path to the file holding the client or server private key. """ if certificate_path is not None: pulumi.set(__self__, "certificate_path", certificate_path) if private_key_path is not None: pulumi.set(__self__, "private_key_path", private_key_path) @property @pulumi.getter(name="certificatePath") def certificate_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the client or server TLS certificate to use. """ return pulumi.get(self, "certificate_path") @certificate_path.setter def certificate_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate_path", value) @property @pulumi.getter(name="privateKeyPath") def private_key_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the client or server private key. """ return pulumi.get(self, "private_key_path") @private_key_path.setter def private_key_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "private_key_path", value) @pulumi.input_type class TlsContextArgs: def __init__(__self__, *, certificate_context: Optional[pulumi.Input['TlsCertificateContextArgs']] = None, validation_context: Optional[pulumi.Input['TlsValidationContextArgs']] = None): """ [Deprecated] The TLS settings for the client or server. The TLS settings for the client or server. :param pulumi.Input['TlsCertificateContextArgs'] certificate_context: Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsValidationContextArgs'] validation_context: Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. """ if certificate_context is not None: pulumi.set(__self__, "certificate_context", certificate_context) if validation_context is not None: pulumi.set(__self__, "validation_context", validation_context) @property @pulumi.getter(name="certificateContext") def certificate_context(self) -> Optional[pulumi.Input['TlsCertificateContextArgs']]: """ Defines the mechanism to obtain the client or server certificate. """ return pulumi.get(self, "certificate_context") @certificate_context.setter def certificate_context(self, value: Optional[pulumi.Input['TlsCertificateContextArgs']]): pulumi.set(self, "certificate_context", value) @property @pulumi.getter(name="validationContext") def validation_context(self) -> Optional[pulumi.Input['TlsValidationContextArgs']]: """ Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. """ return pulumi.get(self, "validation_context") @validation_context.setter def validation_context(self, value: Optional[pulumi.Input['TlsValidationContextArgs']]): pulumi.set(self, "validation_context", value) @pulumi.input_type class TlsValidationContextArgs: def __init__(__self__, *, certificate_path: Optional[pulumi.Input[str]] = None, sds_config: Optional[pulumi.Input['SdsConfigArgs']] = None, validation_source: Optional[pulumi.Input['TlsValidationContextValidationSource']] = None): """ [Deprecated] Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. validate the client/server certificate. :param pulumi.Input[str] certificate_path: The path to the file holding the CA certificate to validate the client or server certificate. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. :param pulumi.Input['TlsValidationContextValidationSource'] validation_source: Defines how TLS certificates are obtained. """ if certificate_path is not None: pulumi.set(__self__, "certificate_path", certificate_path) if sds_config is not None: pulumi.set(__self__, "sds_config", sds_config) if validation_source is not None: pulumi.set(__self__, "validation_source", validation_source) @property @pulumi.getter(name="certificatePath") def certificate_path(self) -> Optional[pulumi.Input[str]]: """ The path to the file holding the CA certificate to validate the client or server certificate. """ return pulumi.get(self, "certificate_path") @certificate_path.setter def certificate_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "certificate_path", value) @property @pulumi.getter(name="sdsConfig") def sds_config(self) -> Optional[pulumi.Input['SdsConfigArgs']]: """ Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. """ return pulumi.get(self, "sds_config") @sds_config.setter def sds_config(self, value: Optional[pulumi.Input['SdsConfigArgs']]): pulumi.set(self, "sds_config", value) @property @pulumi.getter(name="validationSource") def validation_source(self) -> Optional[pulumi.Input['TlsValidationContextValidationSource']]: """ Defines how TLS certificates are obtained. """ return pulumi.get(self, "validation_source") @validation_source.setter def validation_source(self, value: Optional[pulumi.Input['TlsValidationContextValidationSource']]): pulumi.set(self, "validation_source", value) @pulumi.input_type class UDPHealthCheckArgs: def __init__(__self__, *, port: Optional[pulumi.Input[int]] = None, port_name: Optional[pulumi.Input[str]] = None, request: Optional[pulumi.Input[str]] = None, response: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[int] port: The UDP port number for the health check request. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input[str] request: Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. """ if port is not None: pulumi.set(__self__, "port", port) if port_name is not None: pulumi.set(__self__, "port_name", port_name) if request is not None: pulumi.set(__self__, "request", request) if response is not None: pulumi.set(__self__, "response", response) @property @pulumi.getter def port(self) -> Optional[pulumi.Input[int]]: """ The UDP port number for the health check request. Valid values are 1 through 65535. """ return pulumi.get(self, "port") @port.setter def port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "port", value) @property @pulumi.getter(name="portName") def port_name(self) -> Optional[pulumi.Input[str]]: """ Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. """ return pulumi.get(self, "port_name") @port_name.setter def port_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "port_name", value) @property @pulumi.getter def request(self) -> Optional[pulumi.Input[str]]: """ Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. """ return pulumi.get(self, "request") @request.setter def request(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request", value) @property @pulumi.getter def response(self) -> Optional[pulumi.Input[str]]: """ The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. """ return pulumi.get(self, "response") @response.setter def response(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "response", value) @pulumi.input_type class UrlMapTestHeaderArgs: def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, value: Optional[pulumi.Input[str]] = None): """ HTTP headers used in UrlMapTests. :param pulumi.Input[str] name: Header name. :param pulumi.Input[str] value: Header value. """ if name is not None: pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Header name. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def value(self) -> Optional[pulumi.Input[str]]: """ Header value. """ return pulumi.get(self, "value") @value.setter def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @pulumi.input_type class UrlMapTestArgs: def __init__(__self__, *, backend_service_weight: Optional[pulumi.Input[int]] = None, description: Optional[pulumi.Input[str]] = None, expected_output_url: Optional[pulumi.Input[str]] = None, expected_redirect_response_code: Optional[pulumi.Input[int]] = None, headers: Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]] = None, host: Optional[pulumi.Input[str]] = None, path: Optional[pulumi.Input[str]] = None, service: Optional[pulumi.Input[str]] = None): """ Message for the expected URL mappings. :param pulumi.Input[int] backend_service_weight: The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. :param pulumi.Input[str] description: Description of this test case. :param pulumi.Input[str] expected_output_url: The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. :param pulumi.Input[int] expected_redirect_response_code: For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. :param pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]] headers: HTTP headers for this request. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] host: Host portion of the URL. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] path: Path portion of the URL. :param pulumi.Input[str] service: Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. """ if backend_service_weight is not None: pulumi.set(__self__, "backend_service_weight", backend_service_weight) if description is not None: pulumi.set(__self__, "description", description) if expected_output_url is not None: pulumi.set(__self__, "expected_output_url", expected_output_url) if expected_redirect_response_code is not None: pulumi.set(__self__, "expected_redirect_response_code", expected_redirect_response_code) if headers is not None: pulumi.set(__self__, "headers", headers) if host is not None: pulumi.set(__self__, "host", host) if path is not None: pulumi.set(__self__, "path", path) if service is not None: pulumi.set(__self__, "service", service) @property @pulumi.getter(name="backendServiceWeight") def backend_service_weight(self) -> Optional[pulumi.Input[int]]: """ The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. """ return pulumi.get(self, "backend_service_weight") @backend_service_weight.setter def backend_service_weight(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "backend_service_weight", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of this test case. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="expectedOutputUrl") def expected_output_url(self) -> Optional[pulumi.Input[str]]: """ The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. """ return pulumi.get(self, "expected_output_url") @expected_output_url.setter def expected_output_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "expected_output_url", value) @property @pulumi.getter(name="expectedRedirectResponseCode") def expected_redirect_response_code(self) -> Optional[pulumi.Input[int]]: """ For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. """ return pulumi.get(self, "expected_redirect_response_code") @expected_redirect_response_code.setter def expected_redirect_response_code(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "expected_redirect_response_code", value) @property @pulumi.getter def headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]]: """ HTTP headers for this request. If headers contains a host header, then host must also match the header value. """ return pulumi.get(self, "headers") @headers.setter def headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]]]): pulumi.set(self, "headers", value) @property @pulumi.getter def host(self) -> Optional[pulumi.Input[str]]: """ Host portion of the URL. If headers contains a host header, then host must also match the header value. """ return pulumi.get(self, "host") @host.setter def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter def path(self) -> Optional[pulumi.Input[str]]: """ Path portion of the URL. """ return pulumi.get(self, "path") @path.setter def path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path", value) @property @pulumi.getter def service(self) -> Optional[pulumi.Input[str]]: """ Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. """ return pulumi.get(self, "service") @service.setter def service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service", value) @pulumi.input_type class UrlRewriteArgs: def __init__(__self__, *, host_rewrite: Optional[pulumi.Input[str]] = None, path_prefix_rewrite: Optional[pulumi.Input[str]] = None): """ The spec for modifying the path before sending the request to the matched backend service. :param pulumi.Input[str] host_rewrite: Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. :param pulumi.Input[str] path_prefix_rewrite: Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. """ if host_rewrite is not None: pulumi.set(__self__, "host_rewrite", host_rewrite) if path_prefix_rewrite is not None: pulumi.set(__self__, "path_prefix_rewrite", path_prefix_rewrite) @property @pulumi.getter(name="hostRewrite") def host_rewrite(self) -> Optional[pulumi.Input[str]]: """ Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. """ return pulumi.get(self, "host_rewrite") @host_rewrite.setter def host_rewrite(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_rewrite", value) @property @pulumi.getter(name="pathPrefixRewrite") def path_prefix_rewrite(self) -> Optional[pulumi.Input[str]]: """ Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. """ return pulumi.get(self, "path_prefix_rewrite") @path_prefix_rewrite.setter def path_prefix_rewrite(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "path_prefix_rewrite", value) @pulumi.input_type class VpnGatewayVpnGatewayInterfaceArgs: def __init__(__self__, *, interconnect_attachment: Optional[pulumi.Input[str]] = None): """ A VPN gateway interface. :param pulumi.Input[str] interconnect_attachment: URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. """ if interconnect_attachment is not None: pulumi.set(__self__, "interconnect_attachment", interconnect_attachment) @property @pulumi.getter(name="interconnectAttachment") def interconnect_attachment(self) -> Optional[pulumi.Input[str]]: """ URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. """ return pulumi.get(self, "interconnect_attachment") @interconnect_attachment.setter def interconnect_attachment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "interconnect_attachment", value) @pulumi.input_type class WeightedBackendServiceArgs: def __init__(__self__, *, backend_service: Optional[pulumi.Input[str]] = None, header_action: Optional[pulumi.Input['HttpHeaderActionArgs']] = None, weight: Optional[pulumi.Input[int]] = None): """ In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple backend services. The volume of traffic for each backend service is proportional to the weight specified in each WeightedBackendService :param pulumi.Input[str] backend_service: The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] weight: Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. """ if backend_service is not None: pulumi.set(__self__, "backend_service", backend_service) if header_action is not None: pulumi.set(__self__, "header_action", header_action) if weight is not None: pulumi.set(__self__, "weight", weight) @property @pulumi.getter(name="backendService") def backend_service(self) -> Optional[pulumi.Input[str]]: """ The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. """ return pulumi.get(self, "backend_service") @backend_service.setter def backend_service(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "backend_service", value) @property @pulumi.getter(name="headerAction") def header_action(self) -> Optional[pulumi.Input['HttpHeaderActionArgs']]: """ Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. """ return pulumi.get(self, "header_action") @header_action.setter def header_action(self, value: Optional[pulumi.Input['HttpHeaderActionArgs']]): pulumi.set(self, "header_action", value) @property @pulumi.getter def weight(self) -> Optional[pulumi.Input[int]]: """ Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. """ return pulumi.get(self, "weight") @weight.setter def weight(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "weight", value)
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
882,169
A specification of the type and number of accelerator cards attached to the instance. :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance. :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. An access configuration attached to an instance's network interface. Only one access config per instance is supported. :param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. :param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range. :param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. :param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. :param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. :param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. :param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. :param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. :param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT. Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled). :param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false). :param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation. :param pulumi.Input[int] numa_node_count: The number of vNUMA nodes. :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. :param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. An alias IP range attached to an instance's network interface. :param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). :param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. Properties of the SKU instances being reserved. Next ID: 9 :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count. :param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. :param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation. This reservation type allows to pre allocate specific instance configuration. Next ID: 5 :param pulumi.Input[str] count: Specifies the number of resources that are allocated. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: The instance properties for the reservation. [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInitializeParamsArchitecture'] architecture: The architecture of the attached disk. Valid values are arm64 or x86_64. :param pulumi.Input[str] description: An optional description. Provide this property when creating the disk. :param pulumi.Input[str] disk_name: Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. :param pulumi.Input[str] disk_type: Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. :param pulumi.Input[Sequence[pulumi.Input[str]]] license_codes: Integer license codes indicating which licenses are attached to this disk. :param pulumi.Input[Sequence[pulumi.Input[str]]] licenses: A list of publicly visible licenses. Reserved for Google's use. :param pulumi.Input[bool] multi_writer: Indicates whether or not the disk can be read/write attached to more than one instance. :param pulumi.Input['AttachedDiskInitializeParamsOnUpdateAction'] on_update_action: Specifies which action to take on instance update with this disk. Default is to use the existing disk. :param pulumi.Input[str] provisioned_iops: Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to. Only applicable for regional resources. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. :param pulumi.Input[str] source_image: The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_image_encryption_key: The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. :param pulumi.Input[str] source_instant_snapshot: The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. :param pulumi.Input[str] source_snapshot: The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. :param pulumi.Input['CustomerEncryptionKeyArgs'] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. An instance-attached disk resource. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[bool] boot: Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. :param pulumi.Input[str] device_name: Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. :param pulumi.Input[str] disk_size_gb: The size of the disk in GB. :param pulumi.Input[bool] force_attach: [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. :param pulumi.Input[Sequence[pulumi.Input['GuestOsFeatureArgs']]] guest_os_features: A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. :param pulumi.Input['AttachedDiskInitializeParamsArgs'] initialize_params: [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. :param pulumi.Input['AttachedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. :param pulumi.Input['AttachedDiskMode'] mode: The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. :param pulumi.Input['AttachedDiskSavedState'] saved_state: For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. :param pulumi.Input[str] source: Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. :param pulumi.Input['AttachedDiskType'] type: Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging. :param pulumi.Input[Sequence[pulumi.Input['AuditLogConfigArgs']]] audit_log_configs: The configuration for logging of each type of permission. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] service: Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging. :param pulumi.Input[Sequence[pulumi.Input[str]]] exempted_members: Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. :param pulumi.Input[bool] ignore_child_exemptions: This is deprecated and has no effect. Do not use. :param pulumi.Input['AuditLogConfigLogType'] log_type: The log type that this config enables. This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsPermissionType'] permission_type: This is deprecated and has no effect. Do not use. CPU utilization policy. :param pulumi.Input['AutoscalingPolicyCpuUtilizationPredictiveMethod'] predictive_method: Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. :param pulumi.Input[float] utilization_target: The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. Custom utilization metric policy. :param pulumi.Input[str] filter: A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. :param pulumi.Input[str] metric: The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. :param pulumi.Input[float] single_instance_assignment: If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. :param pulumi.Input[float] utilization_target: The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. :param pulumi.Input['AutoscalingPolicyCustomMetricUtilizationUtilizationTargetType'] utilization_target_type: Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. Configuration parameters of autoscaling based on load balancing. :param pulumi.Input[float] utilization_target: Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_down_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. Configuration that allows for slower scale in so that even if Autoscaler recommends an abrupt scale in of a MIG, it will be throttled as specified by the parameters below. :param pulumi.Input['FixedOrPercentArgs'] max_scaled_in_replicas: Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. :param pulumi.Input[int] time_window_sec: How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. Cloud Autoscaler policy. :param pulumi.Input[int] cool_down_period_sec: The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. :param pulumi.Input['AutoscalingPolicyCpuUtilizationArgs'] cpu_utilization: Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. :param pulumi.Input[Sequence[pulumi.Input['AutoscalingPolicyCustomMetricUtilizationArgs']]] custom_metric_utilizations: Configuration parameters of autoscaling based on a custom metric. :param pulumi.Input['AutoscalingPolicyLoadBalancingUtilizationArgs'] load_balancing_utilization: Configuration parameters of autoscaling based on load balancer. :param pulumi.Input[int] max_num_replicas: The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. :param pulumi.Input[int] min_num_replicas: The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. :param pulumi.Input['AutoscalingPolicyMode'] mode: Defines operating mode for this policy. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] scaling_schedules: Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Message containing Cloud CDN configuration for a backend bucket. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['BackendBucketCdnPolicyCacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendBucketCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendBucketCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. Bypass the cache when the specified request headers are present, e.g. Pragma or Authorization headers. Values are case insensitive. The presence of such a header overrides the cache_mode setting. :param pulumi.Input[str] header_name: The header field name to match on when bypassing cache. Values are case-insensitive. Specify CDN TTLs for response error codes. :param pulumi.Input[int] code: The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. :param pulumi.Input[int] ttl: The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Message containing Cloud CDN configuration for a backend service. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyBypassCacheOnRequestHeaderArgs']]] bypass_cache_on_request_headers: Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. :param pulumi.Input['CacheKeyPolicyArgs'] cache_key_policy: The CacheKeyPolicy for this CdnPolicy. :param pulumi.Input['BackendServiceCdnPolicyCacheMode'] cache_mode: Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. :param pulumi.Input[int] client_ttl: Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). :param pulumi.Input[int] default_ttl: Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[int] max_ttl: Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. :param pulumi.Input[bool] negative_caching: Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. :param pulumi.Input[Sequence[pulumi.Input['BackendServiceCdnPolicyNegativeCachingPolicyArgs']]] negative_caching_policy: Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. :param pulumi.Input[bool] request_coalescing: If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. :param pulumi.Input[int] serve_while_stale: Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. :param pulumi.Input[str] signed_url_cache_max_age_sec: Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. Connection Tracking configuration for this BackendService. :param pulumi.Input['BackendServiceConnectionTrackingPolicyConnectionPersistenceOnUnhealthyBackends'] connection_persistence_on_unhealthy_backends: Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). :param pulumi.Input[bool] enable_strong_affinity: Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. :param pulumi.Input[int] idle_timeout_sec: Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. :param pulumi.Input['BackendServiceConnectionTrackingPolicyTrackingMode'] tracking_mode: Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes). :param pulumi.Input[bool] disable_connection_drain_on_failover: This can be set to true only if the protocol is TCP. The default is false. :param pulumi.Input[bool] drop_traffic_if_unhealthy: If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. :param pulumi.Input[float] failover_ratio: The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). :param pulumi.Input[str] application_name: Application name to be used in OAuth consent screen. :param pulumi.Input[str] client_name: Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. :param pulumi.Input[str] developer_email_address: Developer's information to be used in OAuth consent screen. Identity-Aware Proxy :param pulumi.Input[bool] enabled: Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. :param pulumi.Input[str] oauth2_client_id: OAuth2 client ID to use for the authentication flow. :param pulumi.Input['BackendServiceIAPOAuth2ClientInfoArgs'] oauth2_client_info: [Input Only] OAuth client info required to generate client id to be used for IAP. :param pulumi.Input[str] oauth2_client_secret: OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly The available logging options for the load balancer traffic served by this backend service. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for the load balancer traffic served by this backend service. :param pulumi.Input[float] sample_rate: This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. Message containing information of one individual backend. :param pulumi.Input['BackendBalancingMode'] balancing_mode: Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. :param pulumi.Input[float] capacity_scaler: A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[bool] failover: This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. :param pulumi.Input[str] group: The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. :param pulumi.Input[int] max_connections: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_endpoint: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_connections_per_instance: Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. :param pulumi.Input[int] max_rate: Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_endpoint: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_rate_per_instance: Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. :param pulumi.Input[float] max_utilization: Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. Associates `members`, or principals, with a `role`. :param pulumi.Input[str] binding_id: This is deprecated and has no effect. Do not use. :param pulumi.Input['ExprArgs'] condition: The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). :param pulumi.Input[Sequence[pulumi.Input[str]]] members: Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. :param pulumi.Input[str] role: Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Message containing what to include in the cache key for a request for Cloud CDN. :param pulumi.Input[bool] include_host: If true, requests to different hosts will be cached separately. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_http_headers: Allows HTTP request headers (by name) to be used in the cache key. :param pulumi.Input[Sequence[pulumi.Input[str]]] include_named_cookies: Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. :param pulumi.Input[bool] include_protocol: If true, http and https requests will be cached separately. :param pulumi.Input[bool] include_query_string: If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_blacklist: Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. :param pulumi.Input[Sequence[pulumi.Input[str]]] query_string_whitelist: Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. [Deprecated] gRPC call credentials to access the SDS server. gRPC call credentials to access the SDS server. :param pulumi.Input['CallCredentialsCallCredentialType'] call_credential_type: The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. :param pulumi.Input['MetadataCredentialsFromPluginArgs'] from_plugin: Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. [Deprecated] gRPC channel credentials to access the SDS server. gRPC channel credentials to access the SDS server. :param pulumi.Input['TlsCertificatePathsArgs'] certificates: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsChannelCredentialType'] channel_credential_type: The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. Settings controlling the volume of requests, connections and retries to this backend service. :param pulumi.Input['DurationArgs'] connect_timeout: The timeout for new network connections to hosts. :param pulumi.Input[int] max_connections: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_pending_requests: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_requests: The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. :param pulumi.Input[int] max_requests_per_connection: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] max_retries: Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionIam'] iam: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionOp'] op: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] svc: This is deprecated and has no effect. Do not use. :param pulumi.Input['ConditionSys'] sys: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: This is deprecated and has no effect. Do not use. A set of Confidential Instance options. :param pulumi.Input[bool] enable_confidential_compute: Defines whether the instance should have confidential compute enabled. Message containing connection draining configuration. :param pulumi.Input[int] draining_timeout_sec: Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash. :param pulumi.Input[str] name: Name of the cookie. :param pulumi.Input[str] path: Path to set for the cookie. :param pulumi.Input['DurationArgs'] ttl: Lifetime of the cookie. This message defines settings for a consistent hash style load balancer. :param pulumi.Input['ConsistentHashLoadBalancerSettingsHttpCookieArgs'] http_cookie: Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] http_header_name: The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. :param pulumi.Input[str] minimum_ring_size: The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. :param pulumi.Input[bool] allow_credentials: In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_headers: Specifies the content for the Access-Control-Allow-Headers header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_methods: Specifies the content for the Access-Control-Allow-Methods header. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origin_regexes: Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[Sequence[pulumi.Input[str]]] allow_origins: Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. :param pulumi.Input[bool] disabled: If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. :param pulumi.Input[Sequence[pulumi.Input[str]]] expose_headers: Specifies the content for the Access-Control-Expose-Headers header. :param pulumi.Input[int] max_age: Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. :param pulumi.Input[str] kms_key_name: The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key :param pulumi.Input[str] kms_key_service_account: The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ :param pulumi.Input[str] raw_key: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" :param pulumi.Input[str] rsa_encrypted_key: Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem Deprecation status for a public resource. :param pulumi.Input[str] deleted: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] deprecated: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] obsolete: An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. :param pulumi.Input[str] replacement: The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. :param pulumi.Input['DeprecationStatusState'] state: The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. :param pulumi.Input['RolloutPolicyArgs'] state_override: The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. A specification of the desired way to instantiate a disk in the instance template when its created from a source instance. :param pulumi.Input[bool] auto_delete: Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). :param pulumi.Input[str] custom_image: The custom source image to be used to restore this disk when instantiating this instance template. :param pulumi.Input[str] device_name: Specifies the device name of the disk to which the configurations apply to. :param pulumi.Input['DiskInstantiationConfigInstantiateFrom'] instantiate_from: Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. A set of Display Device options :param pulumi.Input[bool] enable_display: Defines whether the instance has Display enabled. :param pulumi.Input[str] zone: The URL of the zone. The zone must exist in the region where the managed instance group is located. :param pulumi.Input['DistributionPolicyTargetShape'] target_shape: The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). :param pulumi.Input[Sequence[pulumi.Input['DistributionPolicyZoneConfigurationArgs']]] zones: Zones where the regional managed instance group will create and manage its instances. A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". Range is approximately 10,000 years. :param pulumi.Input[int] nanos: Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. :param pulumi.Input[str] seconds: Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. :param pulumi.Input[str] description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. :param pulumi.Input[str] expression: Textual representation of an expression in Common Expression Language syntax. :param pulumi.Input[str] location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. :param pulumi.Input[str] title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. The interface for the external VPN gateway. :param pulumi.Input[int] id: The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 :param pulumi.Input[str] ip_address: IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. :param pulumi.Input[str] content: The raw content in the secure keys file. :param pulumi.Input['FileContentBufferFileType'] file_type: The file type of source file. :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. The available logging options for a firewall rule. :param pulumi.Input[bool] enable: This field denotes whether to enable logging for a particular firewall rule. :param pulumi.Input['FirewallLogConfigMetadata'] metadata: This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. :param pulumi.Input[str] attachment_target: The target that the firewall policy is attached to. :param pulumi.Input[str] name: The name for an association. :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_address_groups: Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_region_codes: Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleMatcherLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_address_groups: Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_fqdns: Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_region_codes: Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] src_secure_tags: List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. :param pulumi.Input[str] name: Name of the secure tag, created with TagManager's TagValue API. Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. :param pulumi.Input[str] description: An optional description for this resource. :param pulumi.Input['FirewallPolicyRuleDirection'] direction: The direction in which this rule applies. :param pulumi.Input[bool] disabled: Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. :param pulumi.Input['FirewallPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. :param pulumi.Input[Sequence[pulumi.Input['FirewallPolicyRuleSecureTagArgs']]] target_secure_tags: A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. Encapsulates numeric value that can be either absolute or relative. :param pulumi.Input[int] fixed: Specifies a fixed number of VM instances. This must be a positive integer. :param pulumi.Input[int] percent: Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule. :param pulumi.Input[str] namespace: Service Directory namespace to register the forwarding rule under. :param pulumi.Input[str] service: Service Directory service to register the forwarding rule under. :param pulumi.Input[str] service_directory_region: [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesArgs'] instance_properties: Properties of the SKU instances being reserved. :param pulumi.Input[str] total_count: Total number of instances for which capacity assurance is requested at a future time period. :param pulumi.Input[str] start_time: Start time of the Future Reservation. The start_time is an RFC3339 string. :param pulumi.Input[str] grpc_service_name: The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. :param pulumi.Input[int] port: The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. :param pulumi.Input['GRPCHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. [Deprecated] gRPC config to access the SDS server. gRPC config to access the SDS server. :param pulumi.Input['CallCredentialsArgs'] call_credentials: The call credentials to access the SDS server. :param pulumi.Input['ChannelCredentialsArgs'] channel_credentials: The channel credentials to access the SDS server. :param pulumi.Input[str] target_uri: The target URI of the SDS server. Guest OS features. :param pulumi.Input['GuestOsFeatureType'] type: The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. :param pulumi.Input[str] host: The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTP2HealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTP2HealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP/2 health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTP2HealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. :param pulumi.Input[str] host: The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTP health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. :param pulumi.Input[str] host: The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['HTTPSHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. :param pulumi.Input['HTTPSHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request_path: The request path of the HTTPS health check request. The default value is /. :param pulumi.Input[str] response: The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. :param pulumi.Input['HTTPSHealthCheckWeightReportMode'] weight_report_mode: Weight report mode. used for weighted Load Balancing. Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[str] path_matcher: The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. Specification for how requests are aborted as part of fault injection. :param pulumi.Input[int] http_status: The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. Specifies the delay introduced by the load balancer before forwarding the request to the backend service as part of fault injection. :param pulumi.Input['DurationArgs'] fixed_delay: Specifies the value of the fixed delay interval. :param pulumi.Input[float] percentage: The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by the load balancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. :param pulumi.Input['HttpFaultAbortArgs'] abort: The specification for how client requests are aborted as part of fault injection. :param pulumi.Input['HttpFaultDelayArgs'] delay: The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. HttpFilterConfiguration supplies additional contextual settings for networkservices.HttpFilter resources enabled by Traffic Director. :param pulumi.Input[str] config: The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl :param pulumi.Input[str] config_type_url: The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct :param pulumi.Input[str] filter_name: Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm The request and response header transformations that take effect before the request is passed along to the selected backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] request_headers_to_add: Headers to add to a matching request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input[str]]] request_headers_to_remove: A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderOptionArgs']]] response_headers_to_add: Headers to add the response before sending the response back to the client. :param pulumi.Input[Sequence[pulumi.Input[str]]] response_headers_to_remove: A list of header names for headers that need to be removed from the response before sending the response back to the client. matchRule criteria for request header matches. :param pulumi.Input[str] exact_match: The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[str] header_name: The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. :param pulumi.Input[bool] invert_match: If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. :param pulumi.Input[str] prefix_match: The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input[bool] present_match: A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. :param pulumi.Input['Int64RangeMatchArgs'] range_match: The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. :param pulumi.Input[str] regex_match: The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input[str] suffix_match: The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. Specification determining how headers are added to requests or responses. :param pulumi.Input[str] header_name: The name of the header. :param pulumi.Input[str] header_value: The value of the header to add. :param pulumi.Input[bool] replace: If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. HttpRouteRuleMatch criteria for a request's query parameter. :param pulumi.Input[str] exact_match: The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] name: The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. :param pulumi.Input[bool] present_match: Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. :param pulumi.Input[str] regex_match: The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. Specifies settings for an HTTP redirect. :param pulumi.Input[str] host_redirect: The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. :param pulumi.Input[bool] https_redirect: If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. :param pulumi.Input[str] path_redirect: The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input[str] prefix_redirect: The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. :param pulumi.Input['HttpRedirectActionRedirectResponseCode'] redirect_response_code: The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. :param pulumi.Input[bool] strip_query: If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. The retry policy associates with HttpRouteRule :param pulumi.Input[int] num_retries: Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. :param pulumi.Input['DurationArgs'] per_try_timeout: Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input[str]]] retry_conditions: Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable :param pulumi.Input['CorsPolicyArgs'] cors_policy: The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input['HttpFaultInjectionArgs'] fault_injection_policy: The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. :param pulumi.Input['DurationArgs'] max_stream_duration: Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. :param pulumi.Input['RequestMirrorPolicyArgs'] request_mirror_policy: Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input['HttpRetryPolicyArgs'] retry_policy: Specifies the retry policy associated with this route. :param pulumi.Input['DurationArgs'] timeout: Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input['UrlRewriteArgs'] url_rewrite: The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['WeightedBackendServiceArgs']]] weighted_backend_services: A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur. :param pulumi.Input[str] full_path_match: For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpHeaderMatchArgs']]] header_matches: Specifies a list of header match criteria, all of which must match corresponding headers in the request. :param pulumi.Input[bool] ignore_case: Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterArgs']]] metadata_filters: Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] prefix_match: For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. :param pulumi.Input[Sequence[pulumi.Input['HttpQueryParameterMatchArgs']]] query_parameter_matches: Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] regex_match: For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. The HttpRouteRule setting specifies how to match an HTTP request and the corresponding routing action that load balancing proxies perform. :param pulumi.Input[str] description: The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_configs: Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpFilterConfigArgs']]] http_filter_metadata: Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleMatchArgs']]] match_rules: The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. :param pulumi.Input[int] priority: For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. The parameters of the raw disk image. :param pulumi.Input['ImageRawDiskContainerType'] container_type: The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. :param pulumi.Input[str] source: The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL Initial State for shielded instance, these are public keys which are safe to store in public :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbs: The Key Database (db). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] dbxs: The forbidden key database (dbx). :param pulumi.Input[Sequence[pulumi.Input['FileContentBufferArgs']]] keks: The Key Exchange Key (KEK). :param pulumi.Input['FileContentBufferArgs'] pk: The Platform Key (PK). :param pulumi.Input['InstancePropertiesPatchArgs'] properties: Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersOnHealthCheck'] on_health_check: If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. :param pulumi.Input['InstanceGroupManagerAutoHealingPolicyAutoHealingTriggersArgs'] auto_healing_triggers: Restricts what triggers autohealing. :param pulumi.Input[str] health_check: The URL for the health check that signals autohealing. :param pulumi.Input[int] initial_delay_sec: The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. :param pulumi.Input[int] timeout_sec: The number of seconds to wait for a readiness signal during initialization before timing out. :param pulumi.Input['InstanceGroupManagerInstanceLifecyclePolicyMetadataBasedReadinessSignalArgs'] metadata_based_readiness_signal: The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. :param pulumi.Input['InstanceGroupManagerUpdatePolicyInstanceRedistributionType'] instance_redistribution_type: The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. :param pulumi.Input['FixedOrPercentArgs'] max_surge: The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. :param pulumi.Input['FixedOrPercentArgs'] max_unavailable: The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. :param pulumi.Input[int] min_ready_sec: Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMinimalAction'] minimal_action: Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. :param pulumi.Input['InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedAction'] most_disruptive_allowed_action: Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. :param pulumi.Input['InstanceGroupManagerUpdatePolicyReplacementMethod'] replacement_method: What action should be used to replace instances. See minimal_action.REPLACE :param pulumi.Input['InstanceGroupManagerUpdatePolicyType'] type: The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). :param pulumi.Input[str] instance_template: The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. :param pulumi.Input[str] name: Name of the version. Unique among all versions in the scope of this managed instance group. :param pulumi.Input['FixedOrPercentArgs'] target_size: Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. Additional instance params. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. Represents the change that you want to make to the instance properties. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The label key-value pairs that you want to patch onto the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. :param pulumi.Input['AdvancedMachineFeaturesArgs'] advanced_machine_features: Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. :param pulumi.Input[bool] can_ip_forward: Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. :param pulumi.Input['ConfidentialInstanceConfigArgs'] confidential_instance_config: Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. :param pulumi.Input[str] description: An optional text description for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input['AttachedDiskArgs']]] disks: An array of disks that are associated with the instances that are created from these properties. :param pulumi.Input['DisplayDeviceArgs'] display_device: Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: A list of guest accelerator cards' type and count to use for instances created from these properties. :param pulumi.Input['InstancePropertiesKeyRevocationActionType'] key_revocation_action_type: KeyRevocationActionType of the instance. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to instances that are created from these properties. :param pulumi.Input[str] machine_type: The machine type to use for instances that are created from these properties. :param pulumi.Input['MetadataArgs'] metadata: The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. :param pulumi.Input[str] min_cpu_platform: Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceArgs']]] network_interfaces: An array of network access configurations for this interface. :param pulumi.Input['NetworkPerformanceConfigArgs'] network_performance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['InstancePropertiesPostKeyRevocationActionType'] post_key_revocation_action_type: PostKeyRevocationActionType of the instance. :param pulumi.Input['InstancePropertiesPrivateIpv6GoogleAccess'] private_ipv6_google_access: The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. :param pulumi.Input['ReservationAffinityArgs'] reservation_affinity: Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] resource_policies: Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. :param pulumi.Input['SchedulingArgs'] scheduling: Specifies the scheduling options for the instances that are created from these properties. :param pulumi.Input[Sequence[pulumi.Input[str]]] secure_tags: [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. :param pulumi.Input[Sequence[pulumi.Input['ServiceAccountArgs']]] service_accounts: A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. :param pulumi.Input['ShieldedInstanceConfigArgs'] shielded_instance_config: Note that for MachineImage, this is not supported yet. :param pulumi.Input['ShieldedVmConfigArgs'] shielded_vm_config: Specifies the Shielded VM options for the instances that are created from these properties. :param pulumi.Input['TagsArgs'] tags: A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. HttpRouteRuleMatch criteria for field values that must stay within the specified integer range. :param pulumi.Input[str] range_end: The end of the range (exclusive) in signed long integer format. :param pulumi.Input[str] range_start: The start of the range (inclusive) in signed long integer format. Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments. :param pulumi.Input[str] interconnect_name: Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. :param pulumi.Input[str] partner_name: Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. :param pulumi.Input[str] portal_url: URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. Describes a pre-shared key used to setup MACsec in static connectivity association key (CAK) mode. :param pulumi.Input[str] name: A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] start_time: A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. Configuration information for enabling Media Access Control security (Macsec) on this Interconnect between Google and your on-premises router. :param pulumi.Input[Sequence[pulumi.Input['InterconnectMacsecPreSharedKeyArgs']]] pre_shared_keys: A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. :param pulumi.Input[bool] fail_open: If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. Commitment for a particular license resource. :param pulumi.Input[str] amount: The number of licenses purchased. :param pulumi.Input[str] cores_per_license: Specifies the core range of the instance for which this license applies. :param pulumi.Input[str] license: Any applicable license URI. :param pulumi.Input[int] min_guest_cpu_count: Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. :param pulumi.Input[int] min_memory_mb: Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. :param pulumi.Input[int] disk_count: Specifies the number of such disks. :param pulumi.Input[int] disk_size_gb: Specifies the size of the disk in base-2 GB. :param pulumi.Input[str] disk_type: Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. This is deprecated and has no effect. Do not use. :param pulumi.Input['AuthorizationLoggingOptionsArgs'] authorization_logging_options: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsLogName'] log_name: This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. :param pulumi.Input[str] name: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] value: This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigCounterOptionsCustomFieldArgs']]] custom_fields: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] field: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] metric: This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsLogMode'] log_mode: This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCloudAuditOptionsArgs'] cloud_audit: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigCounterOptionsArgs'] counter: This is deprecated and has no effect. Do not use. :param pulumi.Input['LogConfigDataAccessOptionsArgs'] data_access: This is deprecated and has no effect. Do not use. [Deprecated] Custom authenticator credentials. Custom authenticator credentials. :param pulumi.Input[str] name: Plugin name. :param pulumi.Input[str] struct_config: A text proto that conforms to a Struct type definition interpreted by the plugin. MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer. :param pulumi.Input[str] name: Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. :param pulumi.Input[str] value: The value of the label must match the specified value. value can have a maximum length of 1024 characters. Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers. :param pulumi.Input[Sequence[pulumi.Input['MetadataFilterLabelMatchArgs']]] filter_labels: The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. :param pulumi.Input['MetadataFilterFilterMatchCriteria'] filter_match_criteria: Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. Metadata :param pulumi.Input[str] key: Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. :param pulumi.Input[str] value: Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). A metadata key/value entry. :param pulumi.Input[Sequence[pulumi.Input['MetadataItemsItemArgs']]] items: Array of key/value pairs. The total size of all keys and values must be less than 512 KB. The named port. For example: <"http", 80>. :param pulumi.Input[str] name: The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. :param pulumi.Input[int] port: The port number, which can be a value between 1 and 65535. Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". :param pulumi.Input[str] url_mask: A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. :param pulumi.Input[str] version: Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] function: A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". :param pulumi.Input[str] url_mask: A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] service: Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". :param pulumi.Input[str] tag: Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". :param pulumi.Input[str] url_mask: A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. Configuration for a serverless network endpoint group (NEG). The platform must be provided. Note: The target backend service must be in the same project and located in the same region as the Serverless NEG. :param pulumi.Input[str] platform: The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com :param pulumi.Input[str] resource: The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name :param pulumi.Input[str] url_mask: A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag :param pulumi.Input[str] version: The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag :param pulumi.Input[str] ip_address: An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. :param pulumi.Input[str] subnetwork: If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork :param pulumi.Input[int] vlan: VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. A network interface resource attached to an instance. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] access_configs: An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. :param pulumi.Input[Sequence[pulumi.Input['AliasIpRangeArgs']]] alias_ip_ranges: An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. :param pulumi.Input[int] internal_ipv6_prefix_length: The prefix length of the primary internal IPv6 range. :param pulumi.Input[Sequence[pulumi.Input['AccessConfigArgs']]] ipv6_access_configs: An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. :param pulumi.Input[str] ipv6_address: An IPv6 internal network address for this network interface. :param pulumi.Input[str] network: URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default :param pulumi.Input[str] network_ip: An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. :param pulumi.Input['NetworkInterfaceNicType'] nic_type: The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. :param pulumi.Input[int] queue_count: The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. :param pulumi.Input['NetworkInterfaceStackType'] stack_type: The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. :param pulumi.Input[Sequence[pulumi.Input['NetworkInterfaceSubInterfaceArgs']]] subinterfaces: SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. :param pulumi.Input[str] subnetwork: The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide. :param pulumi.Input['NetworkRoutingConfigRoutingMode'] routing_mode: The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. :param pulumi.Input[int] max_nodes: The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. :param pulumi.Input[int] min_nodes: The minimum number of nodes that the group should have. :param pulumi.Input['NodeGroupAutoscalingPolicyMode'] mode: The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. Time window specified for daily maintenance operations. GCE's internal maintenance will be performed within this window. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint. :param pulumi.Input[str] authority: Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 :param pulumi.Input[str] endpoint: Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. :param pulumi.Input[str] payload_name: Optional. If specified, this field is used to populate the "name" field in gRPC requests. :param pulumi.Input['DurationArgs'] resend_interval: Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. :param pulumi.Input[int] retry_duration_sec: How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. :param pulumi.Input['DurationArgs'] base_ejection_time: The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. :param pulumi.Input[int] consecutive_errors: Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. :param pulumi.Input[int] consecutive_gateway_failure: The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. :param pulumi.Input[int] enforcing_consecutive_errors: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. :param pulumi.Input[int] enforcing_consecutive_gateway_failure: The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input[int] enforcing_success_rate: The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. :param pulumi.Input['DurationArgs'] interval: Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. :param pulumi.Input[int] max_ejection_percent: Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. :param pulumi.Input[int] success_rate_minimum_hosts: The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. :param pulumi.Input[int] success_rate_request_volume: The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. :param pulumi.Input[int] success_rate_stdev_factor: This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. :param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_ranges: IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. :param pulumi.Input['PacketMirroringFilterDirection'] direction: Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. :param pulumi.Input[Sequence[pulumi.Input[str]]] ip_protocols: Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. :param pulumi.Input[str] url: Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. :param pulumi.Input[str] url: Resource URL to the virtual machine instance which is being mirrored. :param pulumi.Input[str] url: Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoInstanceInfoArgs']]] instances: A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. :param pulumi.Input[Sequence[pulumi.Input['PacketMirroringMirroredResourceInfoSubnetInfoArgs']]] subnetworks: A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. :param pulumi.Input[str] url: URL of the network resource. A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service is used. :param pulumi.Input['HttpRouteActionArgs'] default_route_action: defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. :param pulumi.Input[str] default_service: The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use :param pulumi.Input['HttpRedirectActionArgs'] default_url_redirect: When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[str] name: The name to which this PathMatcher is referred by the HostRule. :param pulumi.Input[Sequence[pulumi.Input['PathRuleArgs']]] path_rules: The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. :param pulumi.Input[Sequence[pulumi.Input['HttpRouteRuleArgs']]] route_rules: The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL. :param pulumi.Input[Sequence[pulumi.Input[str]]] paths: The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. :param pulumi.Input['HttpRouteActionArgs'] route_action: In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. :param pulumi.Input[str] service: The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. :param pulumi.Input['HttpRedirectActionArgs'] url_redirect: When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. Represents a sub PublicDelegatedPrefix. :param pulumi.Input[str] delegatee_project: Name of the project scoping this PublicDelegatedSubPrefix. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] ip_cidr_range: The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. :param pulumi.Input[bool] is_address: Whether the sub prefix is delegated to create Address resources in the delegatee project. :param pulumi.Input[str] name: The name of the sub public delegated prefix. A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer doesn't wait for responses from the shadow service. Before sending traffic to the shadow service, the host or authority header is suffixed with -shadow. :param pulumi.Input[str] backend_service: The full or partial URL to the BackendService resource being mirrored to. Specifies the reservations that this instance can consume from. :param pulumi.Input['ReservationAffinityConsumeReservationType'] consume_reservation_type: Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. :param pulumi.Input[str] key: Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] name: The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input['ShareSettingsArgs'] share_settings: Share-settings for shared-reservation :param pulumi.Input['AllocationSpecificSKUReservationArgs'] specific_reservation: Reservation for instances with specific machine shapes. :param pulumi.Input[bool] specific_reservation_required: Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. :param pulumi.Input[str] zone: Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. Commitment for a particular resource (a Commitment is composed of one or more of these). :param pulumi.Input[str] accelerator_type: Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. :param pulumi.Input[str] amount: The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. :param pulumi.Input['ResourceCommitmentType'] type: Type of resource for which this commitment applies. Possible values are VCPU and MEMORY Time window specified for daily operations. :param pulumi.Input[int] days_in_cycle: Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. :param pulumi.Input[str] start_time: Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality :param pulumi.Input[int] availability_domain_count: The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network :param pulumi.Input['ResourcePolicyGroupPlacementPolicyCollocation'] collocation: Specifies network collocation :param pulumi.Input['ResourcePolicyGroupPlacementPolicyLocality'] locality: Specifies network locality :param pulumi.Input['ResourcePolicyGroupPlacementPolicyScope'] scope: Scope specifies the availability domain to which the VMs should be spread. :param pulumi.Input['ResourcePolicyGroupPlacementPolicyStyle'] style: Specifies instances to hosts placement relationship :param pulumi.Input[int] vm_count: Number of vms in this placement group Time window specified for hourly operations. :param pulumi.Input[int] hours_in_cycle: Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. Schedule for an instance operation. :param pulumi.Input[str] schedule: Specifies the frequency for the operation, using the unix-cron format. An InstanceSchedulePolicy specifies when and how frequent certain operations are performed on the instance. :param pulumi.Input[str] expiration_time: The expiration time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] start_time: The start time of the schedule. The timestamp is an RFC3339 string. :param pulumi.Input[str] time_zone: Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_start_schedule: Specifies the schedule for starting instances. :param pulumi.Input['ResourcePolicyInstanceSchedulePolicyScheduleArgs'] vm_stop_schedule: Specifies the schedule for stopping instances. Policy for retention of scheduled snapshots. :param pulumi.Input[int] max_retention_days: Maximum age of the snapshot that is allowed to be kept. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete'] on_source_disk_delete: Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. A schedule for disks where the schedueled operations are performed. Specified snapshot properties for scheduled snapshots created by this policy. :param pulumi.Input[str] chain_name: Chain name that the snapshot is created in. :param pulumi.Input[bool] guest_flush: Indication to perform a 'guest aware' snapshot. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] storage_locations: Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyRetentionPolicyArgs'] retention_policy: Retention policy applied to snapshots created by this resource policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicyScheduleArgs'] schedule: A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. :param pulumi.Input['ResourcePolicySnapshotSchedulePolicySnapshotPropertiesArgs'] snapshot_properties: Properties with which snapshots are created such as labels, encryption keys. A concurrency control configuration. Defines a group config that, when attached to an instance, recognizes that instance as part of a group of instances where only up the concurrency_limit of instances in that group can undergo simultaneous maintenance. For more information: go/concurrency-control-design-doc A maintenance window for VMs. When set, we restrict our maintenance operations to this window. :param pulumi.Input['ResourcePolicyVmMaintenancePolicyMaintenanceWindowArgs'] maintenance_window: Maintenance windows that are applied to VMs covered by this policy. :param pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekDay'] day: Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. :param pulumi.Input[str] start_time: Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. Time window specified for weekly operations. :param pulumi.Input[Sequence[pulumi.Input['ResourcePolicyWeeklyCycleDayOfWeekArgs']]] day_of_weeks: Up to 7 intervals/windows, one for each day of the week. A rollout policy configuration. :param pulumi.Input[str] default_rollout_time: An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] location_rollout_policies: Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. Description-tagged IP ranges for the router to advertise. :param pulumi.Input[str] description: User-specified description for the IP range. :param pulumi.Input[str] range: The IP range to advertise. The value must be a CIDR-formatted string. :param pulumi.Input[int] min_receive_interval: The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input[int] min_transmit_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. :param pulumi.Input['RouterBgpPeerBfdMode'] mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] multiplier: The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. :param pulumi.Input['RouterBgpPeerBfdPacketMode'] packet_mode: The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. :param pulumi.Input['RouterBgpPeerBfdSessionInitializationMode'] session_initialization_mode: The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. :param pulumi.Input[int] slow_timer_interval: The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. :param pulumi.Input['RouterBgpPeerAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpPeerAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] advertised_route_priority: The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. :param pulumi.Input['RouterBgpPeerBfdArgs'] bfd: BFD configuration for the BGP peering. :param pulumi.Input['RouterBgpPeerEnable'] enable: The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. :param pulumi.Input[bool] enable_ipv6: Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. :param pulumi.Input[str] interface_name: Name of the interface the BGP peer is associated with. :param pulumi.Input[str] ip_address: IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] ipv6_nexthop_address: IPv6 address of the interface inside Google Cloud Platform. :param pulumi.Input[str] name: Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[int] peer_asn: Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. :param pulumi.Input[str] peer_ip_address: IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. :param pulumi.Input[str] peer_ipv6_nexthop_address: IPv6 address of the BGP interface outside Google Cloud Platform. :param pulumi.Input[str] router_appliance_instance: URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. :param pulumi.Input['RouterBgpAdvertiseMode'] advertise_mode: User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. :param pulumi.Input[Sequence[pulumi.Input['RouterBgpAdvertisedGroupsItem']]] advertised_groups: User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. :param pulumi.Input[Sequence[pulumi.Input['RouterAdvertisedIpRangeArgs']]] advertised_ip_ranges: User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. :param pulumi.Input[int] asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. :param pulumi.Input[int] keepalive_interval: The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. :param pulumi.Input[str] ip_range: IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. :param pulumi.Input[str] linked_interconnect_attachment: URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] linked_vpn_tunnel: URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. :param pulumi.Input[str] name: Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] private_ip_address: The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. :param pulumi.Input[str] redundant_interface: Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] subnetwork: The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. Configuration of logging on a NAT. :param pulumi.Input[bool] enable: Indicates whether or not to export logs. This is false by default. :param pulumi.Input['RouterNatLogConfigFilter'] filter: Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ips: A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_active_ranges: A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_nat_drain_ranges: A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. :param pulumi.Input['RouterNatRuleActionArgs'] action: The action to be enforced for traffic that matches this rule. :param pulumi.Input[str] description: An optional description of this rule. :param pulumi.Input[str] match: CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" :param pulumi.Input[int] rule_number: An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. Defines the IP ranges that want to use NAT for a subnetwork. :param pulumi.Input[str] name: URL for the subnetwork resource that will use NAT. :param pulumi.Input[Sequence[pulumi.Input[str]]] secondary_ip_range_names: A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatSourceIpRangesToNatItem']]] source_ip_ranges_to_nat: Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided. :param pulumi.Input[Sequence[pulumi.Input[str]]] drain_nat_ips: A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. :param pulumi.Input[bool] enable_dynamic_port_allocation: Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] icmp_idle_timeout_sec: Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatLogConfigArgs'] log_config: Configure logging on this NAT. :param pulumi.Input[int] max_ports_per_vm: Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. :param pulumi.Input[int] min_ports_per_vm: Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. :param pulumi.Input[str] name: Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. :param pulumi.Input['RouterNatNatIpAllocateOption'] nat_ip_allocate_option: Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. :param pulumi.Input[Sequence[pulumi.Input[str]]] nat_ips: A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. :param pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]] rules: A list of rules associated with this NAT. :param pulumi.Input['RouterNatSourceSubnetworkIpRangesToNat'] source_subnetwork_ip_ranges_to_nat: Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. :param pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkToNatArgs']]] subnetworks: A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. :param pulumi.Input[int] tcp_established_idle_timeout_sec: Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. :param pulumi.Input[int] tcp_time_wait_timeout_sec: Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. :param pulumi.Input[int] tcp_transitory_idle_timeout_sec: Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. :param pulumi.Input['RouterNatType'] type: Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. :param pulumi.Input[int] udp_idle_timeout_sec: Timeout (in seconds) for UDP connections. Defaults to 30s if not set. This is deprecated and has no effect. Do not use. :param pulumi.Input['RuleAction'] action: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['ConditionArgs']]] conditions: This is deprecated and has no effect. Do not use. :param pulumi.Input[str] description: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input['LogConfigArgs']]] log_configs: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] not_ins: This is deprecated and has no effect. Do not use. :param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: This is deprecated and has no effect. Do not use. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['SSLHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. :param pulumi.Input['SSLHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. An instance-attached disk resource. :param pulumi.Input[str] source_disk: Specifies a URL of the disk attached to the source instance. Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled. :param pulumi.Input[str] key: Corresponds to the label key of Node resource. :param pulumi.Input['SchedulingNodeAffinityOperator'] operator: Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. :param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of Node resource. Sets the scheduling options for an Instance. NextID: 21 :param pulumi.Input[bool] automatic_restart: Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. :param pulumi.Input[int] availability_domain: Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. :param pulumi.Input[int] current_cpus: Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. :param pulumi.Input[str] current_memory_mb: Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. :param pulumi.Input[int] host_error_timeout_seconds: Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. :param pulumi.Input['SchedulingInstanceTerminationAction'] instance_termination_action: Specifies the termination action for the instance. :param pulumi.Input[bool] latency_tolerant: Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. :param pulumi.Input[str] location_hint: An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. :param pulumi.Input['SchedulingMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals. :param pulumi.Input['DurationArgs'] max_run_duration: Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. :param pulumi.Input[int] min_node_cpus: The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. :param pulumi.Input[Sequence[pulumi.Input['SchedulingNodeAffinityArgs']]] node_affinities: A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. :param pulumi.Input['SchedulingOnHostMaintenance'] on_host_maintenance: Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. :param pulumi.Input[bool] preemptible: Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. :param pulumi.Input['SchedulingProvisioningModel'] provisioning_model: Specifies the provisioning model of the instance. :param pulumi.Input[str] termination_time: Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. [Deprecated] The configuration to access the SDS server. The configuration to access the SDS server. :param pulumi.Input['GrpcServiceConfigArgs'] grpc_service_config: The configuration to access the SDS server over GRPC. Configuration options for L7 DDoS detection. :param pulumi.Input[bool] enable: If set to true, enables CAAP for L7 DDoS detection. :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigRuleVisibility'] rule_visibility: Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. Configuration options for Cloud Armor Adaptive Protection (CAAP). :param pulumi.Input['SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigArgs'] layer7_ddos_defense_config: If set to true, enables Cloud Armor Machine Learning. :param pulumi.Input[str] attachment_id: The resource that the security policy is attached to. :param pulumi.Input[str] name: The name for an association. Configuration options for Cloud Armor. :param pulumi.Input[bool] enable_ml: If set to true, enables Cloud Armor Machine Learning. :param pulumi.Input[str] redirect_site_key: An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. :param pulumi.Input[str] header_name: The name of the header to set. :param pulumi.Input[str] header_value: The value to set the named header to. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleHttpHeaderActionHttpHeaderOptionArgs']]] request_headers_to_adds: The list of request headers to add or overwrite if they're already present. :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. :param pulumi.Input[Sequence[pulumi.Input[str]]] ports: An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] dest_ip_ranges: CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigDestinationPortArgs']]] dest_ports: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input['SecurityPolicyRuleMatcherConfigLayer4ConfigArgs']]] layer4_configs: Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] src_ip_ranges: CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified. :param pulumi.Input['SecurityPolicyRuleMatcherConfigArgs'] config: The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. :param pulumi.Input['ExprArgs'] expr: User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. :param pulumi.Input['SecurityPolicyRuleMatcherVersionedExpr'] versioned_expr: Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. :param pulumi.Input[int] count: Number of HTTP(S) requests for calculating the threshold. :param pulumi.Input[int] interval_sec: Interval over which the threshold is computed. :param pulumi.Input[int] ban_duration_sec: Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] ban_threshold: Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. :param pulumi.Input[str] conform_action: Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsEnforceOnKey'] enforce_on_key: Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. :param pulumi.Input[str] enforce_on_key_name: Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. :param pulumi.Input[str] exceed_action: Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] exceed_redirect_options: Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsThresholdArgs'] rate_limit_threshold: Threshold at which to begin ratelimiting. :param pulumi.Input[str] target: Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsType'] type: Type of the redirect action. Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny). :param pulumi.Input[str] action: The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input['SecurityPolicyRuleDirection'] direction: The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[bool] enable_logging: Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. :param pulumi.Input['SecurityPolicyRuleHttpHeaderActionArgs'] header_action: Optional, additional actions that are performed on headers. :param pulumi.Input['SecurityPolicyRuleMatcherArgs'] match: A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. :param pulumi.Input[bool] preview: If set to true, the specified action is not enforced. :param pulumi.Input[int] priority: An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. :param pulumi.Input['SecurityPolicyRuleRateLimitOptionsArgs'] rate_limit_options: Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. :param pulumi.Input['SecurityPolicyRuleRedirectOptionsArgs'] redirect_options: Parameters defining the redirect action. Cannot be specified for any other actions. :param pulumi.Input[str] redirect_target: This must be specified for redirect actions. Cannot be specified for any other actions. :param pulumi.Input[str] rule_number: Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_resources: A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. :param pulumi.Input[Sequence[pulumi.Input[str]]] target_service_accounts: A list of service accounts indicating the sets of instances that are applied with this rule. The authentication and authorization settings for a BackendService. :param pulumi.Input[str] client_tls_policy: Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. The TLS settings for the server. :param pulumi.Input['TlsContextArgs'] proxy_tls_context: Configures the mechanism to obtain security certificates and identity information. :param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alt_names: A list of alternate names to verify the subject identity in the certificate presented by the client. :param pulumi.Input['ServerTlsSettingsTlsMode'] tls_mode: Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. A service account. :param pulumi.Input[str] email: Email address of the service account. :param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: The list of scopes to be made available for this service account. :param pulumi.Input[int] connection_limit: The value of the limit to set. :param pulumi.Input[str] project_id_or_num: The project id or number for the project to set the limit for. The share setting for reservations and sole tenancy node groups. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] folder_map: A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] project_map: A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input[Sequence[pulumi.Input[str]]] projects: A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. :param pulumi.Input['ShareSettingsShareType'] share_type: Type of sharing for this shared-reservation A set of Shielded Instance options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. Enabled by default. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. Disabled by default. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. Enabled by default. The policy describes the baseline against which Instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. A set of Shielded VM options. :param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. :param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. :param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled. The policy describes the baseline against which VM instance boot integrity is measured. :param pulumi.Input[bool] update_auto_learn_policy: Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. :param pulumi.Input['CustomerEncryptionKeyArgs'] disk_encryption_key: The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. :param pulumi.Input[str] source_disk: URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk A specification of the parameters to use when creating the instance template from a source instance. :param pulumi.Input[Sequence[pulumi.Input['DiskInstantiationConfigArgs']]] disk_configs: Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. Configuration and status of a managed SSL certificate. :param pulumi.Input[Sequence[pulumi.Input[str]]] domains: The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). Configuration and status of a self-managed SSL certificate. :param pulumi.Input[str] certificate: A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. :param pulumi.Input[str] private_key: A write-only private key in PEM format. Only insert requests will include this field. Configuration of preserved resources. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] disks: Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] external_ips: External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] internal_ips: Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. The available logging options for this subnetwork. :param pulumi.Input['SubnetworkLogConfigAggregationInterval'] aggregation_interval: Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. :param pulumi.Input[bool] enable: Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. :param pulumi.Input[str] filter_expr: Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. :param pulumi.Input[float] flow_sampling: Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. :param pulumi.Input['SubnetworkLogConfigMetadata'] metadata: Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. :param pulumi.Input[Sequence[pulumi.Input[str]]] metadata_fields: Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. Represents a secondary IP range of a subnetwork. :param pulumi.Input[str] ip_cidr_range: The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. :param pulumi.Input[str] range_name: The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. :param pulumi.Input[str] reserved_internal_range: The URL of the reserved internal range. Subsetting configuration for this BackendService. Currently this is applicable only for Internal TCP/UDP load balancing, Internal HTTP(S) load balancing and Traffic Director. :param pulumi.Input[int] subset_size: The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. :param pulumi.Input[int] port: The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input['TCPHealthCheckPortSpecification'] port_specification: Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. :param pulumi.Input['TCPHealthCheckProxyHeader'] proxy_header: Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. :param pulumi.Input[str] request: The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. A set of instance tags. :param pulumi.Input[Sequence[pulumi.Input[str]]] items: An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. [Deprecated] Defines the mechanism to obtain the client or server certificate. Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsCertificatePathsArgs'] certificate_paths: Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. :param pulumi.Input['TlsCertificateContextCertificateSource'] certificate_source: Defines how TLS certificates are obtained. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. [Deprecated] The paths to the mounted TLS Certificates and private key. The paths to the mounted TLS Certificates and private key. :param pulumi.Input[str] certificate_path: The path to the file holding the client or server TLS certificate to use. :param pulumi.Input[str] private_key_path: The path to the file holding the client or server private key. [Deprecated] The TLS settings for the client or server. The TLS settings for the client or server. :param pulumi.Input['TlsCertificateContextArgs'] certificate_context: Defines the mechanism to obtain the client or server certificate. :param pulumi.Input['TlsValidationContextArgs'] validation_context: Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. [Deprecated] Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. validate the client/server certificate. :param pulumi.Input[str] certificate_path: The path to the file holding the CA certificate to validate the client or server certificate. :param pulumi.Input['SdsConfigArgs'] sds_config: Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. :param pulumi.Input['TlsValidationContextValidationSource'] validation_source: Defines how TLS certificates are obtained. :param pulumi.Input[int] port: The UDP port number for the health check request. Valid values are 1 through 65535. :param pulumi.Input[str] port_name: Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. :param pulumi.Input[str] request: Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. :param pulumi.Input[str] response: The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. HTTP headers used in UrlMapTests. :param pulumi.Input[str] name: Header name. :param pulumi.Input[str] value: Header value. Message for the expected URL mappings. :param pulumi.Input[int] backend_service_weight: The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. :param pulumi.Input[str] description: Description of this test case. :param pulumi.Input[str] expected_output_url: The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. :param pulumi.Input[int] expected_redirect_response_code: For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. :param pulumi.Input[Sequence[pulumi.Input['UrlMapTestHeaderArgs']]] headers: HTTP headers for this request. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] host: Host portion of the URL. If headers contains a host header, then host must also match the header value. :param pulumi.Input[str] path: Path portion of the URL. :param pulumi.Input[str] service: Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. The spec for modifying the path before sending the request to the matched backend service. :param pulumi.Input[str] host_rewrite: Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. :param pulumi.Input[str] path_prefix_rewrite: Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. A VPN gateway interface. :param pulumi.Input[str] interconnect_attachment: URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple backend services. The volume of traffic for each backend service is proportional to the weight specified in each WeightedBackendService :param pulumi.Input[str] backend_service: The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. :param pulumi.Input['HttpHeaderActionArgs'] header_action: Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. :param pulumi.Input[int] weight: Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. The specification for how client requests are aborted as part of fault injection. The number of the guest accelerator cards exposed to this instance. Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types. Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access. The Action to perform when the client connection triggers the rule. Can currently be either "allow" or "deny()" where valid values for status are 403, 404, and 502. The action to be enforced for traffic that matches this rule. This is deprecated and has no effect. Do not use. The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. Controls for advanced machine-related behavior features. Note that for MachineImage, this is not supported yet. User-specified flag to indicate which mode to use for advertisement. User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM. User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups. User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the "bgp" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges. The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win. Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection. An array of alias IP ranges for this network interface. You can only specify this field for network interfaces in VPC networks. In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. Default is false. Specifies the content for the Access-Control-Allow-Headers header. Specifies the content for the Access-Control-Allow-Methods header. Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. Specifies the list of origins that is allowed to do CORS requests. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. The number of licenses purchased. The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU. Application name to be used in OAuth consent screen. The architecture of the attached disk. Valid values are arm64 or x86_64. Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN. The resource that the security policy is attached to. The target that the firewall policy is attached to. The configuration for logging of each type of permission. Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 This is deprecated and has no effect. Do not use. Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance). Restricts what triggers autohealing. Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted. By default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine. Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. The number of availability domains instances will be spread across. If two instances are in different availability domain, they will not be put in the same low latency network The full or partial URL to the BackendService resource being mirrored to. The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting. Specifies how to determine whether the backend of a load balancer can handle additional traffic or is fully loaded. For usage guidelines, see Connection balancing mode. Backends must use compatible balancing modes. For more information, see Supported balancing modes and target capacity settings and Restrictions and guidance for instance groups. Note: Currently, if you use the API to configure incompatible balancing modes, the configuration might be accepted even though it has no impact and is ignored. Specifically, Backend.maxUtilization is ignored when Backend.balancingMode is RATE. In the future, this incompatible combination will be rejected. Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'ban_duration_sec' when the number of requests that exceed the 'rate_limit_threshold' also exceed this 'ban_threshold'. The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s. BFD configuration for the BGP peering. This is deprecated and has no effect. Do not use. Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem. Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. The CacheKeyPolicy for this CdnPolicy. The CacheKeyPolicy for this CdnPolicy. Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS Requires the origin to set valid caching headers to cache content. Responses without these headers will not be cached at Google's edge, and will require a full trip to the origin on every request, potentially impacting performance and increasing load on the origin server. FORCE_CACHE_ALL Cache all content, ignoring any "private", "no-store" or "no-cache" directives in Cache-Control response headers. Warning: this may result in Cloud CDN caching private, per-user (user identifiable) content. CACHE_ALL_STATIC Automatically cache static content, including common image formats, media (video and audio), and web assets (JavaScript and CSS). Requests and responses that are marked as uncacheable, as well as dynamic content (including HTML), will not be cached. The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: - GCE_VM: The local GCE VM service account credentials are used to access the SDS server. - FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server. The call credentials to access the SDS server. Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information. A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert. Defines the mechanism to obtain the client or server certificate. The path to the file holding the client or server TLS certificate to use. The path to the file holding the CA certificate to validate the client or server certificate. Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH. Defines how TLS certificates are obtained. The call credentials to access the SDS server. Chain name that the snapshot is created in. The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server. The channel credentials to access the SDS server. IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. Name of the client to be generated. Optional - If not provided, the name will be autogenerated by the backend. Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact. Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). Specifies a separate client (e.g. browser client) maximum TTL. This is used to clamp the max-age (or Expires) value sent to the client. With FORCE_CACHE_ALL, the lesser of client_ttl and default_ttl is used for the response max-age directive, along with a "public" directive. For cacheable content in CACHE_ALL_STATIC mode, client_ttl clamps the max-age from the origin (if specified), or else sets the response max-age directive to the lesser of the client_ttl and default_ttl, and also ensures a "public" cache-control directive is present. If a client TTL is not specified, a default value (1 hour) will be used. The maximum allowed value is 31,622,400s (1 year). This is deprecated and has no effect. Do not use. The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 302, 307, 308, 404, 405, 410, 421, 451 and 501 are can be specified as values, and you cannot specify a status code more than once. Specifies network collocation The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). This is deprecated and has no effect. Do not use. Specifies the Confidential Instance options. Note that for MachineImage, this is not supported yet. The configuration needed to enable the networkservices.HttpFilter resource. The configuration must be YAML formatted and only contain fields defined in the protobuf identified in configTypeUrl The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified. The fully qualified versioned proto3 type url of the protobuf that the filter expects for its contextual settings, for example: type.googleapis.com/google.protobuf.Struct Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only. The timeout for new network connections to hosts. The value of the limit to set. Specifies connection persistence when backends are unhealthy. The default value is DEFAULT_FOR_PROTOCOL. If set to DEFAULT_FOR_PROTOCOL, the existing connections persist on unhealthy backends only for connection-oriented protocols (TCP and SCTP) and only if the Tracking Mode is PER_CONNECTION (default tracking mode) or the Session Affinity is configured for 5-tuple. They do not persist for UDP. If set to NEVER_PERSIST, after a backend becomes unhealthy, the existing connections on the unhealthy backend are never persisted on the unhealthy backend. They are always diverted to newly selected healthy backends (unless all backends are unhealthy). If set to ALWAYS_PERSIST, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. For more details, see [Connection Persistence for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#connection-persistence) and [Connection Persistence for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#connection-persistence). Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples. The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. The raw content in the secure keys file. The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process. Specifies the core range of the instance for which this license applies. The specification for allowing client-side cross-origin requests. For more information about the W3C recommendation for cross-origin resource sharing (CORS), see Fetch API Living Standard. Not supported when the URL map is bound to a target gRPC proxy. Specifies the number of resources that are allocated. Number of HTTP(S) requests for calculating the threshold. This is deprecated and has no effect. Do not use. Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Current number of vCPUs available for VM. 0 or unset means default vCPUs of the current machine type. Current amount of memory (in MB) available for VM. 0 or unset means default amount of memory of the current machine type. This is deprecated and has no effect. Do not use. The custom source image to be used to restore this disk when instantiating this instance template. Configuration parameters of autoscaling based on a custom metric. This is deprecated and has no effect. Do not use. Defines a schedule that runs on specific days of the week. Specify one or more days. The following options are available: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. Up to 7 intervals/windows, one for each day of the week. Defines a schedule with units measured in months. The value determines how many months pass between the start of each cycle. The Key Database (db). The forbidden key database (dbx). An optional RFC3339 timestamp on or after which the update is considered rolled out to any zone that is not explicitly stated. defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction. The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). Setting a TTL of "0" means "always revalidate". The value of defaultTTL cannot be set to a value greater than that of maxTTL, but can be equal. When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. Name of the project scoping this PublicDelegatedSubPrefix. An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it. An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it. An optional description. Provide this property when creating the disk. An optional description of this resource. Provide this property when you create the resource. Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. An optional description for this resource. An optional description of this resource. Provide this property when you create the resource. The short description conveying the intent of this routeRule. The description can have a maximum length of 1024 characters. An optional text description for the instances that are created from these properties. An optional description of this resource. Provide this property when you create the resource. An optional description of this resource. Provide this property when you create the resource. An optional description of this resource. Provide this property when you create the resource. User-specified description for the IP range. An optional description of this rule. This is deprecated and has no effect. Do not use. An optional description of this resource. Provide this property when you create the resource. Description of this test case. Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 1000. CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000. CIDR IP address range. This field may only be specified when versioned_expr is set to FIREWALL. Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of dest region codes allowed is 5000. Developer's information to be used in OAuth consent screen. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. Specifies the device name of the disk to which the configurations apply to. The direction in which this rule applies. Direction of traffic to mirror, either INGRESS, EGRESS, or BOTH. The default is BOTH. The direction in which this rule applies. This field may only be specified when versioned_expr is set to FIREWALL. This can be set to true only if the protocol is TCP. The default is false. If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled. Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes. Specifies the number of such disks. Encrypts or decrypts a disk using a customer-supplied encryption key. If you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key. If you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance. If you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. Instance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group. The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key. Specifies the disk name. If not specified, the default is to use the name of the instance. If a disk with the same name already exists in the given region, the existing disk is attached to the new instance and the new disk is not created. Specifies the size of the disk in base-2 GB. Specifies the size of the disk in base-2 GB. The size must be at least 10 GB. If you specify a sourceImage, which is required for boot disks, the default size is the size of the sourceImage. If you do not specify a sourceImage, the default disk size is 500 GB. The size of the disk in GB. Specifies the size of the disk in base-2 GB. Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL. Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL. An array of disks that are associated with the instances that are created from these properties. Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks. Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer Note that for MachineImage, this is not supported yet. The domains for which a managed SSL certificate will be generated. Each Google-managed SSL certificate supports up to the [maximum number of domains per Google-managed SSL certificate](/load-balancing/docs/quotas#ssl_certificates). A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only. Configures a duration timeout for existing requests on a removed backend instance. For supported load balancers and protocols, as described in Enabling connection draining. If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false. Email address of the service account. This field denotes whether to enable logging for the load balancer traffic served by this backend service. This field denotes whether to enable logging for a particular firewall rule. Indicates whether or not to export logs. This is false by default, which means no health check logging will be done. The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE. Indicates whether or not to export logs. This is false by default. If set to true, enables CAAP for L7 DDoS detection. Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Defines whether the instance should have confidential compute enabled. Defines whether the instance has Display enabled. Enable Dynamic Port Allocation. If not specified, it is disabled by default. If set to true, - Dynamic Port Allocation will be enabled on this NAT config. - enableEndpointIndependentMapping cannot be set to true. - If minPorts is set, minPortsPerVm must be set to a power of two greater than or equal to 32. If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. Defines whether the instance has integrity monitoring enabled. Enabled by default. Defines whether the instance has integrity monitoring enabled. Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on "goto_next" rules. This field may only be specified when the versioned_expr is set to FIREWALL. If set to true, enables Cloud Armor Machine Learning. Whether to enable nested virtualization or not (default is false). Defines whether the instance has Secure Boot enabled. Disabled by default. Defines whether the instance has Secure Boot enabled. Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. Whether to enable UEFI networking for instance creation. Defines whether the instance has the vTPM enabled. Enabled by default. Defines whether the instance has the vTPM enabled. Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty. Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name. Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key type defaults to ALL. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforce_on_key_name". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. The value should exactly match contents of exactMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch. Only one of presentMatch, exactMatch, or regexMatch must be set. Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceed_redirect_options below. Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This is deprecated and has no effect. Do not use. Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members. The expected output URL evaluated by the load balancer containing the scheme, host, path and query parameters. For rules that forward requests to backends, the test passes only when expectedOutputUrl matches the request forwarded by the load balancer to backends. For rules with urlRewrite, the test verifies that the forwarded request matches hostRewrite and pathPrefixRewrite in the urlRewrite action. When service is specified, expectedOutputUrl`s scheme is ignored. For rules with urlRedirect, the test passes only if expectedOutputUrl matches the URL in the load balancer's redirect response. If urlRedirect specifies https_redirect, the test passes only if the scheme in expectedOutputUrl is also set to HTTPS. If urlRedirect specifies strip_query, the test passes only if expectedOutputUrl does not contain any query parameters. expectedOutputUrl is optional when service is specified. For rules with urlRedirect, the test passes only if expectedRedirectResponseCode matches the HTTP status code in load balancer's redirect response. expectedRedirectResponseCode cannot be set when service is set. The expiration time of the schedule. The timestamp is an RFC3339 string. Specifies the content for the Access-Control-Expose-Headers header. User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Textual representation of an expression in Common Expression Language syntax. External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically. The prefix length of the external IPv6 range. If set to true, the Interconnect will be configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect will be configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router. This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService. The value of the field must be in the range [0, 1]. If the value is 0, the load balancer performs a failover when the number of healthy primary VMs equals zero. For all other values, the load balancer performs a failover when the total number of healthy primary VMs is less than this ratio. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. For the requests impacted by fault injection, timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy. This is deprecated and has no effect. Do not use. The file type of source file. A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. For the filter to be valid for autoscaling purposes, the following rules apply: - You can only use the AND operator for joining selectors. - You can only use direct equality comparison operator (=) without any functions for each selector. - You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. - The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a *per-group metric* for the purpose of autoscaling. If not specified, the type defaults to gce_instance. Try to provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. Specify the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT. This option can take one of the following values: - ERRORS_ONLY: Export logs only for connection failures. - TRANSLATIONS_ONLY: Export logs only for successful connections. - ALL: Export logs for all connections, successful and unsuccessful. Can only be specified if VPC flow logs for this subnetwork is enabled. Export filter used to define which VPC flow logs should be logged. The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries. Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. Name of the networkservices.HttpFilter resource this configuration belongs to. This name must be known to the xDS client. Example: envoy.wasm Specifies a fixed number of VM instances. This must be a positive integer. Specifies the value of the fixed delay interval. Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 unless otherwise specified by the org policy, which means half of all collected logs are reported. A map of folder id and folder config to specify consumer projects for this shared-reservation. This is only valid when share_type's value is DIRECT_PROJECTS_UNDER_SPECIFIC_FOLDERS. Folder id should be a string of number, and without "folders/" prefix. [Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error. Custom authenticator credentials. Valid if callCredentialType is FROM_PLUGIN. For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL. fullPathMatch must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: "func1". The fully-qualified URL of an instance group or network endpoint group (NEG) resource. To determine what types of backends a load balancer supports, see the [Backend services overview](https://cloud.google.com/load-balancing/docs/backend-service#backends). You must use the *fully-qualified* URL (starting with https://www.googleapis.com/) to specify the instance group or NEG. Partial URLs are not supported. The configuration to access the SDS server over GRPC. The gRPC service name for the health check. This field is optional. The value of grpc_service_name has the following meanings by convention: - Empty service_name means the overall status of all services at the backend. - Non-empty service_name means the health of that gRPC service, as defined by the owner of the service. The grpc_service_name can only be ASCII. Specifies accelerator type and count. A list of guest accelerator cards' type and count to use for instances created from these properties. Indication to perform a 'guest aware' snapshot. A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Guest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options. Specifies changes to request and response headers that need to take effect for the selected backendService. The headerAction value specified here is applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. Specifies changes to request and response headers that need to take effect for the selected backend service. HeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap HeaderAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. Optional, additional actions that are performed on headers. Specifies changes to request and response headers that need to take effect for the selected backendService. headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. Specifies a list of header match criteria, all of which must match corresponding headers in the request. The header field name to match on when bypassing cache. Values are case-insensitive. The header field name to match on when bypassing cache. Values are case-insensitive. The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method". When the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin`. The name of the header. The name of the header to set. The value of the header to add. The value to set the named header to. HTTP headers for this request. If headers contains a host header, then host must also match the header value. The URL for the health check that signals autohealing. The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used. Host portion of the URL. If headers contains a host header, then host must also match the header value. Specify the time in seconds for host error detection, the value must be within the range of [90, 330] with the increment of 30, if unset, the default behavior of host error recovery will be used. The host that is used in the redirect response instead of the one that was supplied in the request. The value must be from 1 to 255 characters. Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. The value must be from 1 to 255 characters. The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or .. * based matching is not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. Defines a schedule with units measured in hours. The value determines how many hours pass between the start of each cycle. Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Outbound route specific configuration for networkservices.HttpFilter resources enabled by Traffic Director. httpFilterConfigs only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. Outbound route specific metadata supplied to networkservices.HttpFilter resources enabled by Traffic Director. httpFilterMetadata only applies for load balancers with loadBalancingScheme set to INTERNAL_SELF_MANAGED. See ForwardingRule for more details. The only configTypeUrl supported is type.googleapis.com/google.protobuf.Struct Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD. The HTTP status code used to abort the request. The value must be from 200 to 599 inclusive. For gRPC protocol, the gRPC status code is mapped to HTTP status code according to this mapping table. HTTP status 200 is mapped to gRPC status UNKNOWN. Injecting an OK status is currently not supported by Traffic Director. If set to true, the URL scheme in the redirected request is set to HTTPS. If set to false, the URL scheme of the redirected request remains the same as that of the request. This must only be set for URL maps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted. The default is set to false. This is deprecated and has no effect. Do not use. Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly. Specifies that prefixMatch and fullPathMatch matches are case sensitive. The default value is false. ignoreCase must not be used with regexMatch. Not supported when the URL map is bound to a target gRPC proxy. This is deprecated and has no effect. Do not use. If true, requests to different hosts will be cached separately. Allows HTTP request headers (by name) to be used in the cache key. Allows HTTP request headers (by name) to be used in the cache key. Allows HTTP cookies (by name) to be used in the cache key. The name=value pair will be used in the cache key Cloud CDN generates. If true, http and https requests will be cached separately. If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely. The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600]. [Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both. This is deprecated and has no effect. Do not use. The instance properties for the reservation. Properties of the SKU instances being reserved. The instance redistribution policy for regional managed instance groups. Valid values are: - PROACTIVE (default): The group attempts to maintain an even distribution of VM instances across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached. The templates for existing instances in the group do not change unless you run recreateInstances, run applyUpdatesToInstances, or set the group's updatePolicy.type to PROACTIVE; in those cases, existing instances are updated until the `targetSize` for this version is reached. Specifies the termination action for the instance. A set of virtual machine instances that are being mirrored. They must live in zones contained in the same region as this packetMirroring. Note that this config will apply only to those network interfaces of the Instances that belong to the network specified in this packetMirroring. You may specify a maximum of 50 Instances. Specifies whether to include the disk and what image to use. Possible values are: - source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. - custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. - attach-read-only: to attach a read-only disk. Applicable to read-only disks. - do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks. URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner's portal. For instance "Chicago 1". This value may be validated to match approved Partner values. Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance. Name of the interface the BGP peer is associated with. Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. The prefix length of the primary internal IPv6 range. Time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service. Defaults to 1 second. Interval over which the threshold is computed. If set to false, the headerMatch is considered a match if the preceding match criteria are met. If set to true, the headerMatch is considered a match if the preceding match criteria are NOT met. The default setting is false. IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider's VPN gateway, it cannot be an IP address from Google Compute Engine. An IPv4 internal IP address to assign to the instance for this subinterface. If specified, ip_allocation_mode should be set to ALLOCATE_IP. IP address of the interface inside Google Cloud Platform. Only IPv4 is supported. The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24). The IPv4 address range, in CIDR format, represented by this sub public delegated prefix. The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. The range can be any range listed in the Valid ranges list. The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp) or the IP protocol number. The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number. Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored. IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface. An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access. An IPv6 internal network address for this network interface. IPv6 address of the interface inside Google Cloud Platform. Whether the sub prefix is delegated to create Address resources in the delegatee project. Array of key/value pairs. The total size of all keys and values must be less than 512 KB. An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035. The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer's hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 20 and 60. The default is 20. The Key Exchange Key (KEK). Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project. Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify googleapis.com/reservation-name as the key and specify the name of your reservation as its value. Corresponds to the label key of Node resource. KeyRevocationActionType of the instance. The name of the encryption key that is stored in Google Cloud KMS. For example: "kmsKeyName": "projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. For example: "kmsKeyServiceAccount": "name@project_id.iam.gserviceaccount.com/ Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks. The label key-value pairs that you want to patch onto the instance. Labels to apply to instances that are created from these properties. Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty. Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set. Pairs of IP protocols and ports that the rule should match. Pairs of IP protocols and ports that the rule should match. This field may only be specified when versioned_expr is set to FIREWALL. If set to true, enables Cloud Armor Machine Learning. Any applicable license URI. Integer license codes indicating which licenses are attached to this disk. A list of publicly visible licenses. Reserved for Google's use. URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance. Configuration parameters of autoscaling based on load balancer. Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd. Specifies network locality Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API. An opaque location hint used to place the instance close to other resources. This field is for use by internal tools that use the public API. Location based rollout policies to apply to the resource. Currently only zone names are supported and must be represented as valid URLs, like: zones/us-central1-a. The value expects an RFC3339 timestamp on or after which the update is considered rolled out to the specified location. Configure logging on this NAT. This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. The log type that this config enables. Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. The machine type to use for instances that are created from these properties. Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance. Specifies the number of hours after VM instance creation where the VM won't be scheduled for maintenance. For more information about maintenance intervals, see Setting maintenance intervals. For more information about maintenance intervals, see Setting maintenance intervals. Maintenance windows that are applied to VMs covered by this policy. A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The following example is a valid match expression for private NAT: "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced. The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule. Specifies how long results of a preflight request can be cached in seconds. This field translates to the Access-Control-Max-Age header. Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. Defines a target maximum number of simultaneous connections. For usage guidelines, see Connection balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is RATE. Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 50%. The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100. The maximum number of instances that the autoscaler can scale out to. This is required when creating or updating an autoscaler. The maximum number of replicas must not be lower than minimal number of replicas. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Maximum number of ports allocated to a VM from this NAT config when Dynamic Port Allocation is enabled. If Dynamic Port Allocation is not enabled, this field has no effect. If Dynamic Port Allocation is enabled, and this field is set, it must be set to a power of two greater than minPortsPerVm, or 64 if minPortsPerVm is not set. If Dynamic Port Allocation is enabled and this field is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. Defines a maximum number of HTTP requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. Defines a maximum target for requests per second (RPS). For usage guidelines, see Rate balancing mode and Utilization balancing mode. Not available if the backend's balancingMode is CONNECTION. The maximum number of parallel requests that allowed to the backend service. If not specified, there is no limit. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Maximum age of the snapshot that is allowed to be kept. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Specifies the max run duration for the given instance. If specified, the instance termination action will be performed at the end of the run duration. Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. Maximum allowed number (or %) of VMs that can be deducted from the peak recommendation during the window autoscaler looks at when computing recommendations. Possibly all these VMs can be deleted at once so user service needs to be prepared to lose that many VMs in one step. Specifies the maximum duration (timeout) for streams on the selected route. Unlike the timeout field where the timeout duration starts from the time the request has been fully processed (known as *end-of-stream*), the duration in this field is computed from the beginning of the stream until the response has been processed, including all retries. A stream that does not complete in this duration is closed. If not specified, this field uses the maximum maxStreamDuration value among all backend services associated with the route. This field is only allowed if the Url map is used with backend services with loadBalancingScheme set to INTERNAL_SELF_MANAGED. The maximum number of instances that can be created above the specified targetSize during the update process. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxSurge is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge. Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Specifies the maximum allowed TTL for cached content served by this origin. Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTTL seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. Headers sent to the client will not be modified. Setting a TTL of "0" means "always revalidate". The maximum allowed value is 31,622,400s (1 year), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded if necessary. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM. By default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy. The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied: - The instance's status is RUNNING. - If there is a health check on the instance group, the instance's health check status must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. This value can be either a fixed number or, if the group has 10 or more instances, a percentage. If you set a percentage, the number of instances is rounded if necessary. The default value for maxUnavailable is a fixed value equal to the number of zones in which the managed instance group operates. At least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable. Optional parameter to define a target capacity for the UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage guidelines, see Utilization balancing mode. Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs. The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata. The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information. Can only be specified if VPC flow logs for this subnetwork is enabled. Configures whether all, none or a subset of metadata fields should be added to the reported VPC flow logs. Default is EXCLUDE_ALL_METADATA. The configuration for metadata based readiness signal sent by the instance during initialization when stopping / suspending an instance. The Instance Group Manager will wait for a signal that indicates successful initialization before stopping / suspending an instance. If a successful readiness signal is not sent before timeout, the corresponding instance will not be stopped / suspended. Instead, an error will be visible in the lastAttempt.errors field of the managed instance in the listmanagedinstances method. If metadataBasedReadinessSignal.timeoutSec is unset, the Instance Group Manager will directly proceed to suspend / stop instances, skipping initialization on them. Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" was set to CUSTOM_METADATA. Opaque filter criteria used by the load balancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to the load balancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadata filters are specified, all of them need to be satisfied in order to be considered a match. metadataFilters specified here is applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to. metadataFilters only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE. This is deprecated and has no effect. Do not use. Minimum cpu platform the reservation. Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy Bridge". For more information, read Specifying a Minimum CPU Platform. Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start. Minimum memory required to use the Instance. Enforced at Instance creation and Instance start. The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. The minimum number of nodes that the group should have. The minimum number of replicas that the autoscaler can scale in to. This cannot be less than 0. If not provided, autoscaler chooses a default value depending on maximum number of instances allowed. Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM. Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600]. The minimum interval, in milliseconds, between BFD control packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. The minimum interval, in milliseconds, between BFD control packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If set, this value must be between 1000 and 30000. The default is 1000. Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action. The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node. The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. Defines operating mode for this policy. The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For more information, see Autoscaler modes. The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. Indicates whether or not the disk can be read/write attached to more than one instance. The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 5 and 16. The default is 5. The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. Name of the cookie. The name for an association. Name of the secure tag, created with TagManager's TagValue API. The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails. Name of the version. Unique among all versions in the scope of this managed instance group. A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This is deprecated and has no effect. Do not use. Plugin name. Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long. The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. The name to which this PathMatcher is referred by the HostRule. The name of the sub public delegated prefix. The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Name of this interface entry. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. URL for the subnetwork resource that will use NAT. Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035. The name for an association. Header name. Service Directory namespace to register the forwarding rule under. Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance. Specify the NatIpAllocateOption, which can take one of the following values: - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty. A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project. Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. When the cache mode is set to CACHE_ALL_STATIC or USE_ORIGIN_HEADERS, negative caching applies to responses with the specified response code that lack any Cache-Control, Expires, or Pragma: no-cache directives. When the cache mode is set to FORCE_CACHE_ALL, negative caching applies to all responses with the specified response code, and override any caching headers. By default, Cloud CDN will apply the following default TTLs to these status codes: HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected Request), 501 (Not Implemented): 60s. These defaults can be overridden in negative_caching_policy. Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. Sets a cache TTL for the specified HTTP status code. negative_caching must be enabled to configure negative_caching_policy. Omitting the policy and leaving negative_caching enabled will use Cloud CDN's default cache TTLs. Note that when specifying an explicit negative_caching_policy, you should take care to specify a cache TTL for all response codes that you wish to cache. Cloud CDN will not apply any default negative caching when a policy exists. URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default An array of network access configurations for this interface. An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system. Note that for MachineImage, this is not supported yet. This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP. The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. A set of node affinity and anti-affinity configurations. Refer to Configuring node affinity for more information. Overrides reservationAffinity. This is deprecated and has no effect. Do not use. Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. The number of vNUMA nodes. OAuth2 client ID to use for the authentication flow. [Input Only] OAuth client info required to generate client id to be used for IAP. OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field. @InputOnly An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it. If you have configured an application-based health check for the group, this field controls whether to trigger VM autohealing based on a failed health check. Valid values are: - ON (default): The group recreates running VMs that fail the application-based health check. - OFF: When set to OFF, you can still observe instance health state, but the group does not recreate VMs that fail the application-based health check. This is useful for troubleshooting and setting up your health check configuration. Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options. Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. Specifies which action to take on instance update with this disk. Default is to use the existing disk. This is deprecated and has no effect. Do not use. Defines the operation of node selection. Valid operators are IN for affinity and NOT_IN for anti-affinity. The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO. Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values. Path to set for the cookie. Path portion of the URL. The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion. Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters. The path that is used in the redirect response instead of the one that was supplied in the request. pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list. Within a given pathMatcher, only one of pathRules or routeRules must be set. The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. Optional. If specified, this field is used to populate the "name" field in gRPC requests. Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value. IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported. IPv6 address of the BGP interface outside Google Cloud Platform. Specifies a non-zero timeout per retry attempt. If not specified, will use the timeout set in the HttpRouteAction field. If timeout in the HttpRouteAction field is not set, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%. The percentage of traffic for connections, operations, or requests that is aborted as part of fault injection. The value must be from 0.0 to 100.0 inclusive. The percentage of traffic for connections, operations, or requests for which a delay is introduced as part of fault injection. The value must be from 0.0 to 100.0 inclusive. This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. The Platform Key (PK). The platform of the backend target(s) of this NEG. Possible values include: 1. API Gateway: apigateway.googleapis.com 2. App Engine: appengine.googleapis.com 3. Cloud Functions: cloudfunctions.googleapis.com 4. Cloud Run: run.googleapis.com The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. The port number, which can be a value between 1 and 65535. The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535. The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535. The UDP port number for the health check request. Valid values are 1 through 65535. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields. Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields. URL of the Partner's portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values. An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. An optional list of ports to which this rule applies. This field is only applicable for the UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. This field may only be specified when versioned_expr is set to FIREWALL. PostKeyRevocationActionType of the instance. A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK will be generated for each key in the key chain. Google router will automatically pick the key with the most recent startTime when establishing or re-establishing a MACsec secure link. Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: * NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. * OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. The value of the header must start with the contents of prefixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request. prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request is used for the redirect. The value must be from 1 to 1024 characters. A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not. Only one of presentMatch, exactMatch, or regexMatch must be set. If set to true, the specified action is not enforced. An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority. For routeRules within a given pathMatcher, priority determines the order in which a load balancer interprets routeRules. RouteRules are evaluated in order of priority, from the lowest to highest number. The priority of a rule decreases as its number increases (1, 2, 3, N+1). The first rule that matches the request is applied. You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number from 0 to 2147483647 inclusive. Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules. An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance, such as a Next Gen Firewall, a Virtual Router, or an SD-WAN VM. The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default. Note that for MachineImage, this is not supported yet. A write-only private key in PEM format. Only insert requests will include this field. The path to the file holding the client or server private key. The project id or number for the project to set the limit for. A map of project id and project config. This is only valid when share_type's value is SPECIFIC_PROJECTS. A List of Project names to specify consumer projects for this shared-reservation. This is only valid when share_type's value is SPECIFIC_PROJECTS. Properties for instances that are created using this instances config. You can add or modify properties using the instanceGroupManagers.patch or regionInstanceGroupManagers.patch. After setting instances_config, you must update your instances to use it; for example, you can use the applyUpdatesToInstances method. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the Extreme persistent disk documentation. Specifies the provisioning model of the instance. Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE. Configures the mechanism to obtain security certificates and identity information. The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range. Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request. Not supported when the URL map is bound to a target gRPC proxy. Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. Names of query string parameters to include in cache keys. All other parameters will be excluded. '&' and '=' will be percent encoded and not treated as delimiters. Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters. The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It'll be empty if not specified by the users. The IP range to advertise. The value must be a CIDR-formatted string. The end of the range (exclusive) in signed long integer format. The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL. The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork. The start of the range (inclusive) in signed long integer format. Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. Threshold at which to begin ratelimiting. Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rawKey": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" Parameters defining the redirect action. Cannot be specified for any other actions. The HTTP Status code to use for this RedirectAction. Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method is retained. - PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method is retained. An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This must be specified for redirect actions. Cannot be specified for any other actions. Name of the interface that will be redundant with the current interface you are creating. The redundantInterface must belong to the same Cloud Router as the interface here. To establish the BGP session to a Router Appliance VM, you must create two BGP peers. The two BGP peers must be attached to two separate interfaces that are redundant with each other. The redundant_interface must be 1-63 characters long, and comply with RFC1035. Specifically, the redundant_interface must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED. If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. The default value is false. The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource. What action should be used to replace instances. See minimal_action.REPLACE URLs of the zones where the disk should be replicated to. Only applicable for regional resources. The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII. Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII. If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. Headers to add to a matching request before forwarding the request to the backendService. The list of request headers to add or overwrite if they're already present. A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. The request path of the HTTP/2 health check request. The default value is /. The request path of the HTTP health check request. The default value is /. The request path of the HTTPS health check request. The default value is /. Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. Specifies the reservations that instances can consume from. Note that for MachineImage, this is not supported yet. The URL of the reserved internal range. The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. The resource identified by this value is platform-specific and is as follows: 1. API Gateway: The gateway ID 2. App Engine: The service name 3. Cloud Functions: The function name 4. Cloud Run: The service name Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. Resource manager tags to be bound to the instance. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT & PATCH) when empty. Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name. Resource policies (names, not URLs) applied to instances created from these properties. Note that for MachineImage, this is not supported yet. The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII. The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII. The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII. Headers to add the response before sending the response back to the client. A list of header names for headers that need to be removed from the response before sending the response back to the client. Retention policy applied to snapshots created by this resource policy. Specifies one or more conditions when this retry policy applies. Valid values are: - 5xx: retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. - connect-failure: a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. - retriable-4xx: a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. - refused-stream: a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. - cancelled: a retry is attempted if the gRPC status code in the response header is set to cancelled. - deadline-exceeded: a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. - internal: a retry is attempted if the gRPC status code in the response header is set to internal. - resource-exhausted: a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. - unavailable: a retry is attempted if the gRPC status code in the response header is set to unavailable. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true. - cancelled - deadline-exceeded - internal - resource-exhausted - unavailable How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number. Specifies the retry policy associated with this route. Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction. In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction. The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number. Within a given pathMatcher, you can set only one of pathRules or routeRules. URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance must be located in zones contained in the same region as this Cloud Router. The VM instance is the peer side of the BGP session. The network-wide routing mode to use. If set to REGIONAL, this network's Cloud Routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's Cloud Routers will advertise routes with all subnets of this network, across regions. Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource. You can provide either the rawKey or the rsaEncryptedKey. For example: "rsaEncryptedKey": "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" The key must meet the following requirements before you can provide it to Compute Engine: 1. The key is wrapped using a RSA public key certificate provided by Google. 2. After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at: https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem An integer uniquely identifying a rule in the list. The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. Identifier for the rule. This is only unique within the given security policy. This can only be set during rule creation, if rule number is not specified it will be generated by the server. Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. A list of rules associated with this NAT. This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0. For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api. Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler, and they can overlap. During overlapping periods the greatest min_required_replicas of all scaling schedules is applied. Up to 128 scaling schedules are allowed. Specifies the frequency for the operation, using the unix-cron format. A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy. Specifies the scheduling options for the instances that are created from these properties. Scope specifies the availability domain to which the VMs should be spread. The list of scopes to be made available for this service account. Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS. A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if "LIST_OF_SECONDARY_IP_RANGES" is one of the values in source_ip_ranges_to_nat. Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years [Input Only] Secure tags to apply to this instance. Maximum number of secure tags allowed is 50. Note that for MachineImage, this is not supported yet. Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. This setting defines the default "max-stale" duration for any cached responses that do not specify a max-stale directive. Stale responses that exceed the TTL configured here will not be served. The default limit (max-stale) is 86400s (1 day), which will allow stale content to be served up to this limit beyond the max-age (or s-max-age) of a cached response. The maximum allowed value is 604800 (1 week). Set this to zero (0) to disable serve-while-stale. Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services. Service Directory service to register the forwarding rule under. The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: "default", "my-service". Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: "run-service". The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. Expected BackendService or BackendBucket resource the given URL should be mapped to. The service field cannot be set if expectedRedirectResponseCode is set. A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances. [Optional] Service Directory region to register this global forwarding rule under. Default to "us-central1". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region. The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE. Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration. Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated. Share-settings for shared-reservation Type of sharing for this shared-reservation Note that for MachineImage, this is not supported yet. Specifies the Shielded VM options for the instances that are created from these properties. Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a "Cache-Control: public, max-age=[TTL]" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered. If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler keeps the number of instances proportional to the value of this metric. The metric itself does not change value due to group resizing. A good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead. The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000. Properties with which snapshots are created such as labels, encryption keys. Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. If desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks. Note that for InstanceTemplate, specify the disk name for zonal disk, and the URL for regional disk. The full Google Cloud Storage URL where the raw disk image archive is stored. The following are valid formats for the URL: - https://storage.googleapis.com/bucket_name/image_archive_name - https://storage.googleapis.com/bucket_name/folder_name/ image_archive_name In order to create an image, you must provide the full or partial URL of one of the following: - The rawDisk.source URL - The sourceDisk URL - The sourceImage URL - The sourceSnapshot URL Specifies a URL of the disk attached to the source instance. URL of the disk attached to the source instance. This can be a full or valid partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or initializeParams.sourceSnapshot or disks.source is required except for local SSD. To create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image: projects/debian-cloud/global/images/family/debian-9 Alternatively, use a specific version of a public operating system image: projects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD To create a disk with a custom image that you created, specify the image name in the following format: global/images/my-custom-image You can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name: global/images/family/my-image-family If the source image is deleted later, this field will not be set. The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys. The source instant-snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceInstantSnapshot initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: us-central1-a/instantSnapshots/my-backup If the source instant-snapshot is deleted later, this field will not be set. Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] A list of URLs of the IP resources used for this NAT rule. These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. A list of URLs of the subnetworks used as source ranges for this NAT Rule. These subnetworks must have purpose set to PRIVATE_NAT. This field is used for private NAT. A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. A list of URLs of subnetworks representing source ranges to be drained. This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. This field is used for private NAT. The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set. The customer-supplied encryption key of the source snapshot. Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region. Reservation for instances with specific machine shapes. Indicates whether the reservation can be consumed by VMs with affinity for "any" reservation. If the field is set, then only VMs that target the reservation by name can consume from this reservation. Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 1000. CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000. CIDR IP address range. Maximum number of src_ip_ranges allowed is 10. Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number of source region codes allowed is 5000. List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256. The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations. Start time of the Future Reservation. The start_time is an RFC3339 string. A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart. Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. The start time of the schedule. The timestamp is an RFC3339 string. Time within the window to start the operations. It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error. The rollout policy for this deprecation. This policy is only enforced by image family views. The rollout policy restricts the zones where the associated resource is considered in a deprecated state. When the rollout policy does not include the user specified zone, or if the zone is rolled out, the associated resource is considered in a deprecated state. The rollout policy for this deprecation is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method. Cloud Storage bucket storage location of the auto snapshot (regional or multi-regional). If set to true, any accompanying query portion of the original URL is removed before redirecting the request. If set to false, the query portion of the original URL is retained. The default is set to false. A text proto that conforms to a Struct type definition interpreted by the plugin. Specifies instances to hosts placement relationship SubInterfaces help enable L2 communication for the instance over subnetworks that support L2. Every network interface will get a default untagged (vlan not specified) subinterface. Users can specify additional tagged subinterfaces which are sub-fields to the Network Interface. Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact. A list of alternate names to verify the subject identity in the certificate presented by the client. If specified, this subnetwork must belong to the same network as that of the network interface. If not specified the subnet of network interface will be used. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not specify this field. If the network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. If you specify this field, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/regions/region /subnetworks/subnetwork - regions/region/subnetworks/subnetwork The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. When you establish a BGP session to a VM instance using this interface, the VM instance must belong to the same subnetwork as the subnetwork specified here. The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used. A set of subnetworks for which traffic from/to all VM instances will be mirrored. They must live in the same region as this packetMirroring. You may specify a maximum of 5 subnetworks. A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above. The number of backends per backend group assigned to each proxy instance or each service mesh client. An input parameter to the `CONSISTENT_HASH_SUBSETTING` algorithm. Can only be set if `policy` is set to `CONSISTENT_HASH_SUBSETTING`. Can only be set if load balancing scheme is `INTERNAL_MANAGED` or `INTERNAL_SELF_MANAGED`. `subset_size` is optional for Internal HTTP(S) load balancing and required for Traffic Director. If you do not provide this value, Cloud Load Balancing will calculate it dynamically to optimize the number of proxies/clients visible to each backend and vice versa. Must be greater than 0. If `subset_size` is larger than the number of backends/endpoints, then subsetting is disabled. The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5. The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100. This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900. The value of the header must end with the contents of suffixMatch. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. This is deprecated and has no effect. Do not use. This is deprecated and has no effect. Do not use. Optional Cloud Run tag represents the "named-revision" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: "revision-0010". A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035. A set of mirrored tags. Traffic from/to all VM instances that have one or more of these tags will be mirrored. Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule. This field may only be specified when versioned_expr is set to FIREWALL. A list of secure tags that controls which instances the firewall rule applies to. If targetSecureTag are specified, then the firewall rule applies only to instances in the VPC network that have one of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE state, then this rule will be ignored. targetSecureTag may not be set at the same time as targetServiceAccounts. If neither targetServiceAccounts nor targetSecureTag are specified, the firewall rule applies to all instances on the specified network. Maximum number of target label tags allowed is 256. A list of service accounts indicating the sets of instances that are applied with this rule. A list of service accounts indicating the sets of instances that are applied with this rule. The distribution shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType). Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: - If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. - if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information. The target URI of the SDS server. Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set. Timeout (in seconds) for TCP connections that are in TIME_WAIT state. Defaults to 120s if not set. Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set. Specifies the timestamp, when the instance will be terminated, in RFC3339 text format. If specified, the instance termination action will be performed at the termination time. The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. How far back autoscaling looks when computing recommendations to include directives regarding slower scale in, as described above. Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as *end-of-stream*) up until the response has been processed. Timeout includes all retries. If not specified, this field uses the largest timeout among all backend services associated with the route. Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. The number of seconds to wait for a readiness signal during initialization before timing out. Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: - SIMPLE Secure connections with standard TLS semantics. - MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication. Total number of instances for which capacity assurance is requested at a future time period. Specifies the key used for connection tracking. There are two options: - PER_CONNECTION: This is the default mode. The Connection Tracking is performed as per the Connection Key (default Hash Method) for the specific protocol. - PER_SESSION: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. For more details, see [Tracking Mode for Network Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-backend-service#tracking-mode) and [Tracking Mode for Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal#tracking-mode). The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. Lifetime of the cookie. The type of configuration. The default and only option is ONE_TO_ONE_NAT. Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT. The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - SECURE_BOOT - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE For more information, see Enabling guest operating system features. The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls). Type of resource for which this commitment applies. Possible values are VCPU and MEMORY Indicates whether this NAT is used for public or private IP translation. If unspecified, it defaults to PUBLIC. Type of the redirect action. Timeout (in seconds) for UDP connections. Defaults to 30s if not set. Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. Updates the integrity policy baseline using the measurements from the VM instance's most recent boot. Resource URL to the forwarding rule representing the ILB configured as destination of the mirrored traffic. Resource URL to the virtual machine instance which is being mirrored. Resource URL to the subnetwork for which traffic from/to all VM instances will be mirrored. URL of the network resource. A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs "foo1-dot-appname.appspot.com/v1" and "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with URL mask "-dot-appname.appspot.com/". The URL mask will parse them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs " mydomain.com/function1" and "mydomain.com/function2" can be backed by the same Serverless NEG with URL mask "/". The URL mask will parse them to { function = "function1" } and { function = "function2" } respectively. A template to parse service and tag fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } and { service="bar2", tag="foo2" } respectively. A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. The fields parsed by this template are platform-specific and are as follows: 1. API Gateway: The gateway ID 2. App Engine: The service and version 3. Cloud Functions: The function name 4. Cloud Run: The service and tag When this rule is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. When a path pattern is matched, the request is redirected to a URL specified by urlRedirect. If urlRedirect is specified, service or routeAction must not be set. Not supported when the URL map is bound to a target gRPC proxy. The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. The target CPU utilization that the autoscaler maintains. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales in the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales out until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization. The target value of the metric that autoscaler maintains. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler works to keep this value constant for each of the instances. Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that the autoscaler maintains. Must be a positive float value. If not defined, the default is 0.8. Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate. Defines how TLS certificates are obtained. This is deprecated and has no effect. Do not use. The value of the label must match the specified value. value can have a maximum length of 1024 characters. Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB). Header value. This is deprecated and has no effect. Do not use. Corresponds to the label values of a reservation resource. This can be either a name to a reservation in the same project or "projects/different-project/reservations/some-reservation-name" to target a shared reservation in the same zone but in a different project. Corresponds to the label values of Node resource. Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: "v1", "v2". The optional resource version. The version identified by this value is platform-specific and is follows: 1. API Gateway: Unused 2. App Engine: The service version 3. Cloud Functions: Unused 4. Cloud Run: The service tag Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config. The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width. VLAN tag. Should match the VLAN(s) supported by the subnetwork to which this subinterface is connecting. Number of vms in this placement group Specifies the schedule for starting instances. Specifies the schedule for stopping instances. Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. The value must be from 0 to 1000. Weight report mode. used for weighted Load Balancing. Weight report mode. used for weighted Load Balancing. Weight report mode. used for weighted Load Balancing. A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. The URL of the zone. The zone must exist in the region where the managed instance group is located. Zone in which the reservation resides. A zone must be provided if the reservation is created within a commitment. Zones where the regional managed instance group will create and manage its instances. coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
395,990
en
0.797074
import sys import matplotlib.pyplot as plt import os root_path = os.path.dirname(os.path.abspath('__file__')) sys.path.append(root_path) from tools.models import one_step_esvr, one_step_esvr_multi_seed from Xianyang_dwt.projects.variables import variables if __name__ == '__main__': one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_traindev_test',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_train_val',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_traindev_append',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) for leading_time in [1,3,5,7,9]: one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pacf',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) for leading_time in [1,3,5,7,9]: one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pcc_local',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pca28',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pcamle',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, ) num_in_one = sum(variables['lags_dict']['db10-2'].values()) for n_components in range(num_in_one-16,num_in_one+1): one_step_esvr_multi_seed( root_path=root_path, station='Xianyang', decomposer='dwt', predict_pattern='one_step_1_ahead_forecast_pacf_pca'+str(n_components),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle n_calls=100, )
Xianyang_dwt/projects/esvr_one_step.py
2,889
hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle+str(i), hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle+str(i), hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
583
en
0.560142
# -*- coding: utf-8 -*- # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Test operator_utils.""" from mindquantum.core.operators import ( FermionOperator, QubitExcitationOperator, QubitOperator, ) from mindquantum.core.operators.utils import ( commutator, count_qubits, down_index, hermitian_conjugated, normal_ordered, number_operator, up_index, ) def test_count_qubits(): """Test count_qubits""" qubit_op = QubitOperator("X1 Y2") assert count_qubits(qubit_op) == 3 fer_op = FermionOperator("1^") assert count_qubits(fer_op) == 2 qubit_exc_op = QubitExcitationOperator("4^ 1") assert count_qubits(qubit_exc_op) == 5 def test_normal_ordered(): """Test normal_ordered function""" op = FermionOperator("3 4^") assert str(normal_ordered(op)) == '-1 [4^ 3] ' def test_commutator(): """Test commutator""" qub_op1 = QubitOperator("X1 Y2") qub_op2 = QubitOperator("X1 Z2") qub_op3 = 2j * QubitOperator("X2") assert commutator(qub_op1, qub_op2) == qub_op3 assert commutator(qub_op1, qub_op1) == QubitOperator() qubit_exc_op1 = QubitExcitationOperator(((4, 1), (1, 0)), 2.0j) qubit_exc_op2 = QubitExcitationOperator(((3, 1), (2, 0)), 2.0j) qubit_exc_op3 = QubitExcitationOperator("3^ 2 4^ 1", 4.0) + QubitExcitationOperator("4^ 1 3^ 2", -4.0) assert commutator(qubit_exc_op1, qubit_exc_op2).compress() == qubit_exc_op3 assert commutator(qubit_exc_op1, qubit_exc_op1) == QubitExcitationOperator() def test_number_operator(): """Test number operator""" nmode = 3 # other parameters by default check_str = '1 [0^ 0] +\n1 [1^ 1] +\n1 [2^ 2] ' assert str(number_operator(nmode)) == check_str check_str2 = '1 [3^ 3] ' assert str(number_operator(None, nmode)) == check_str2 def test_up_index(): """This is for labelling the spin-orbital index with spin alpha""" alpha = 2 assert up_index(alpha) == 4 def test_down_index(): """This is for labelling the spin-orbital index with spin beta""" beta = 1 assert down_index(beta) == 3 def test_hermitian_conjugated(): """Test hermitian_conjugated for the QubitOperator and Fermion Operator""" qub_op1 = -1j * QubitOperator("X1 Y2") + QubitOperator("X1") qub_op2 = 1j * QubitOperator("X1 Y2") + QubitOperator("X1") assert hermitian_conjugated(qub_op1) == qub_op2 fer_op1 = FermionOperator("1^ 2") fer_op2 = FermionOperator("2^ 1") assert hermitian_conjugated(fer_op1) == fer_op2 qubit_exc_op1 = QubitExcitationOperator(((4, 1), (1, 0)), 2.0j).normal_ordered() qubit_exc_op2 = QubitExcitationOperator(((4, 0), (1, 1)), -2.0j).normal_ordered() assert hermitian_conjugated(qubit_exc_op1) == qubit_exc_op2
tests/st/test_core/test_operators/test_operators_utils.py
3,388
Test commutator Test count_qubits This is for labelling the spin-orbital index with spin beta Test hermitian_conjugated for the QubitOperator and Fermion Operator Test normal_ordered function Test number operator This is for labelling the spin-orbital index with spin alpha Test operator_utils. -*- coding: utf-8 -*- Copyright 2021 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ other parameters by default
985
en
0.770826
import os import soft_renderer.functional as srf import torch, random import numpy as np import tqdm from haven import haven_utils as hu from PIL import Image, ImageOps, ImageFilter import torchvision.transforms as transforms class_ids_map = { '02691156': 'Airplane', '02828884': 'Bench', '02933112': 'Cabinet', '02958343': 'Car', '03001627': 'Chair', '03211117': 'Display', '03636649': 'Lamp', '03691459': 'Loudspeaker', '04090263': 'Rifle', '04256520': 'Sofa', '04379243': 'Table', '04401088': 'Telephone', '04530566': 'Watercraft', } CLASS_IDS = sorted(list(class_ids_map.keys())) class ShapeNet(object): def __init__(self, directory=None, split=None, exp_dict=None): self.class_ids = CLASS_IDS n_classes = exp_dict.get('n_classes') if n_classes: self.class_ids = CLASS_IDS[:n_classes] classes = exp_dict.get('classes') if classes: classes_map = {key: value for (value, key) in class_ids_map.items()} self.class_ids = sorted([classes_map[k] for k in classes]) self.split = split self.elevation = 30. self.distance = 2.732 self.exp_dict = exp_dict self.class_ids_map = class_ids_map self.images = [] self.voxels = [] self.labels = [] self.class_ids_pair = list(zip(self.class_ids, [self.class_ids_map[i] for i in self.class_ids])) self.num_data = {} self.pos = {} count = 0 # ind2class = {key: value for (value, key) in enumerate(self.class_ids)} loop = tqdm.tqdm(self.class_ids) loop.set_description(f'Loading {split} Dataset') n_train_objects = exp_dict.get('n_train_objects') n_ratio_val = exp_dict.get('n_val_ratio') # assert n_ratio_val is not None if n_train_objects is None and split == 'unlabeled': return if split in ['train', 'unlabeled']: set_name = 'train' elif split in ['val', 'test']: set_name = 'val' if n_ratio_val is None: set_name = split for ci, class_id in enumerate(loop): i = list(np.load(os.path.join(directory, '%s_%s_images.npz' % (class_id, set_name))).items())[0][1] v = list(np.load(os.path.join(directory, '%s_%s_voxels.npz' % (class_id, set_name))).items())[0][1] # train get only first n if split == 'train' and n_train_objects is not None: n = n_train_objects i = i[:n] v = v[:n] # unlabeled get only first n if split == 'unlabeled' and n_train_objects is not None: n = n_train_objects i = i[n:] v = v[n:] elif split == 'val' and n_ratio_val is not None: n = int(i.shape[0]*n_ratio_val) i = i[:n] v = v[:n] elif split == 'test' and n_ratio_val is not None: n = int(i.shape[0]*n_ratio_val) i = i[n:] v = v[n:] self.images += [i] self.voxels += [v] self.labels += [torch.ones(i.shape[0]) * ci] self.images = np.concatenate(self.images, axis=0) self.images = torch.from_numpy(self.images.astype('float32') / 255.) self.voxels = np.concatenate(self.voxels, axis=0) self.voxels = torch.from_numpy(self.voxels.astype('float32')) self.labels = torch.cat(self.labels, dim=0) # positible view points distances = torch.ones(24).float() * self.distance elevations = torch.ones(24).float() * self.elevation self.possible_viewpoints = srf.get_points_from_angles(distances, elevations, -torch.arange(24) * 15) print(f'{split} samples: {len(self)}') def __len__(self): if isinstance(self.images, list): return len(self.images) return self.images.shape[0] def __getitem__(self, idx, vp_idx=None, vp_idx_b=None): # image A images_a, viewpoints_a, viewpoint_id_a = self.get_random_viewpoint(idx, vp_idx) # image B images_b, viewpoints_b, viewpoint_id_b = self.get_random_viewpoint(idx, vp_idx_b) return {'images_a':images_a, 'viewpoints_a': viewpoints_a, 'object_id_a':idx, 'viewpoint_id_a':viewpoint_id_a, 'images_b':images_b, 'viewpoints_b': viewpoints_b, 'object_id_b':idx, 'viewpoint_id_b':viewpoint_id_b} def insert_images(self, images): self.images = torch.cat([self.images, images], dim=0) def pop_indices(self, ind_list): selected_images = self.images[ind_list] keep_idx = np.delete(np.arange(self.images.shape[0]), ind_list) self.images = self.images[keep_idx] # return list(np.delete(arr, id_to_del)) return selected_images def get_random_viewpoint(self, idx, vp_idx=None): if vp_idx is None: viewpoint_id = np.random.randint(0, 24) else: viewpoint_id = vp_idx # get image and viewpoint images = self.images[idx][viewpoint_id] # get viewpoint viewpoints = srf.get_points_from_angles(self.distance, self.elevation, -viewpoint_id * 15) return images, torch.as_tensor(viewpoints), viewpoint_id def get_all_batches_for_evaluation(self, batch_size, class_id): assert self.images.shape[0] == self.voxels.shape[0] ci = self.class_ids.index(class_id) ind_ci = self.labels == ci im_cls = self.images[ind_ci] vx_cls = self.voxels[ind_ci] data_ids = np.arange(im_cls.shape[0]) viewpoint_ids = np.tile(np.arange(24), data_ids.size) data_ids = np.repeat(data_ids, 24) * 24 + viewpoint_ids distances = torch.ones(data_ids.size).float() * self.distance elevations = torch.ones(data_ids.size).float() * self.elevation viewpoints_all = srf.get_points_from_angles(distances, elevations, -torch.from_numpy(viewpoint_ids).float() * 15) shape = im_cls.shape[-3:] images = im_cls.view(-1, *shape) shape = vx_cls.shape[-3:] voxels = vx_cls.view(-1, *shape) for i in range((data_ids.size - 1) // batch_size + 1): im = images[data_ids[i * batch_size:(i + 1) * batch_size]] vx = voxels[data_ids[i * batch_size:(i + 1) * batch_size] // 24] yield im, vx class Transform: def __init__(self): self.transform = transforms.Compose([ transforms.ToPILImage(), transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply( [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8 ), transforms.RandomGrayscale(p=0.2), GaussianBlur(p=1.0), Solarization(p=0.0), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) ]) self.transform_prime = transforms.Compose([ transforms.ToPILImage(), transforms.RandomResizedCrop(224, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply( [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8 ), transforms.RandomGrayscale(p=0.2), GaussianBlur(p=0.1), Solarization(p=0.2), transforms.ToTensor(), # transforms.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) ]) def __call__(self, x): y1 = self.transform(x) y2 = self.transform_prime(x) return y1, y2 class GaussianBlur(object): def __init__(self, p): self.p = p def __call__(self, img): if random.random() < self.p: sigma = random.random() * 1.9 + 0.1 return img.filter(ImageFilter.GaussianBlur(sigma)) else: return img class Solarization(object): def __init__(self, p): self.p = p def __call__(self, img): if random.random() < self.p: return ImageOps.solarize(img) else: return img
src/datasets.py
8,743
ind2class = {key: value for (value, key) in enumerate(self.class_ids)} assert n_ratio_val is not None train get only first n unlabeled get only first n positible view points image A image B return list(np.delete(arr, id_to_del)) get image and viewpoint get viewpoint transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
460
en
0.561961
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2020 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Script to calculate sleet probability.""" from improver import cli @cli.clizefy @cli.with_output def process(snow: cli.inputcube, rain: cli.inputcube): """Calculate sleet probability. Calculates the sleet probability using the calculate_sleet_probability plugin. Args: snow (iris.cube.Cube): An iris Cube of the probability of snow. rain (iris.cube.Cube): An iris Cube of the probability of rain. Returns: iris.cube.Cube: Returns a cube with the probability of sleet. """ from improver.calculate_sleet_prob import calculate_sleet_probability result = calculate_sleet_probability(snow, rain) return result
improver/cli/sleet_probability.py
2,396
Calculate sleet probability. Calculates the sleet probability using the calculate_sleet_probability plugin. Args: snow (iris.cube.Cube): An iris Cube of the probability of snow. rain (iris.cube.Cube): An iris Cube of the probability of rain. Returns: iris.cube.Cube: Returns a cube with the probability of sleet. Script to calculate sleet probability. !/usr/bin/env python -*- coding: utf-8 -*- ----------------------------------------------------------------------------- (C) British Crown Copyright 2017-2020 Met Office. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2,014
en
0.833927
# Copyright 2019 The TensorTrade Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License import numpy as np from typing import Dict, Tuple from tensortrade.base import Identifiable from tensortrade.base.exceptions import InsufficientFundsForAllocation from tensortrade.instruments import Quantity class Wallet(Identifiable): """A wallet stores the balance of a specific instrument on a specific exchange.""" def __init__(self, exchange: 'Exchange', quantity: 'Quantity'): self._exchange = exchange self._instrument = quantity.instrument self._balance = quantity self._locked = {} @classmethod def from_tuple(cls, wallet_tuple: Tuple['Exchange', 'Instrument', float]): exchange, instrument, balance = wallet_tuple return cls(exchange, Quantity(instrument, balance)) @property def exchange(self) -> 'Exchange': return self._exchange @exchange.setter def exchange(self, exchange: 'Exchange'): raise ValueError("You cannot change a Wallet's Exchange after initialization.") @property def instrument(self) -> 'Instrument': return self._instrument @instrument.setter def instrument(self, instrument: 'Exchange'): raise ValueError("You cannot change a Wallet's Instrument after initialization.") @property def balance(self) -> 'Quantity': """The total balance of the wallet available for use.""" return self._balance @balance.setter def balance(self, balance: 'Quantity'): self._balance = balance @property def locked_balance(self) -> 'Quantity': """The total balance of the wallet locked in orders.""" locked_balance = Quantity(self.instrument, 0) for quantity in self.locked.values(): locked_balance += quantity.size return locked_balance @property def total_balance(self) -> 'Quantity': """The total balance of the wallet, both available for use and locked in orders.""" total_balance = self._balance for quantity in self.locked.values(): total_balance += quantity.size return total_balance @property def locked(self) -> Dict[str, 'Quantity']: return self._locked def deallocate(self, path_id: str): if path_id in self.locked.keys(): quantity = self.locked.pop(path_id, None) if quantity is not None: self += quantity.size * self.instrument def __iadd__(self, quantity: 'Quantity') -> 'Wallet': if quantity.is_locked: if quantity.path_id not in self.locked.keys(): self._locked[quantity.path_id] = quantity else: self._locked[quantity.path_id] += quantity else: self._balance += quantity return self def __isub__(self, quantity: 'Quantity') -> 'Wallet': if quantity.is_locked and self.locked[quantity.path_id]: if quantity > self.locked[quantity.path_id]: raise InsufficientFundsForAllocation(self.locked[quantity.path_id], quantity.size) self._locked[quantity.path_id] -= quantity elif not quantity.is_locked: if quantity > self._balance: raise InsufficientFundsForAllocation(self.balance, quantity.size) self._balance -= quantity return self def __str__(self): return '<Wallet: balance={}, locked={}>'.format(self.balance, self.locked_balance) def __repr__(self): return str(self)
tensortrade/wallets/wallet.py
4,074
A wallet stores the balance of a specific instrument on a specific exchange. The total balance of the wallet available for use. The total balance of the wallet locked in orders. The total balance of the wallet, both available for use and locked in orders. Copyright 2019 The TensorTrade Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
818
en
0.860811
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('mig_main', '0003_officerposition_position_type'), ] operations = [ migrations.CreateModel( name='Committee', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=128)), ('description', models.TextField()), ('is_active', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
mig_main/migrations/0004_committee.py
736
-*- coding: utf-8 -*-
21
en
0.767281
# -*- coding: utf-8 -*- # Author: Jev Kuznetsov <jev.kuznetsov@gmail.com> # License: BSD """ Toolset working with yahoo finance data This module includes functions for easy access to YahooFinance data Functions ---------- - `getHistoricData` get historic data for a single symbol - `getQuote` get current quote for a symbol - `getScreenerSymbols` load symbols from a yahoo stock screener file Classes --------- - `HistData` a class for working with multiple symbols """ from datetime import datetime, date import urllib2 from pandas import DataFrame, Index, HDFStore, WidePanel import numpy as np import os from extra import ProgressBar def parseStr(s): ''' convert string to a float or string ''' f = s.strip() if f[0] == '"': return f.strip('"') elif f=='N/A': return np.nan else: try: # try float conversion prefixes = {'M':1e6, 'B': 1e9} prefix = f[-1] if prefix in prefixes: # do we have a Billion/Million character? return float(f[:-1])*prefixes[prefix] else: # no, convert to float directly return float(f) except ValueError: # failed, return original string return s class HistData(object): ''' a class for working with yahoo finance data ''' def __init__(self, autoAdjust=True): self.startDate = (2008,1,1) self.autoAdjust=autoAdjust self.wp = WidePanel() def load(self,dataFile): """load data from HDF""" if os.path.exists(dataFile): store = HDFStore(dataFile) symbols = [str(s).strip('/') for s in store.keys() ] data = dict(zip(symbols,[store[symbol] for symbol in symbols])) self.wp = WidePanel(data) store.close() else: raise IOError('Data file does not exist') def save(self,dataFile): """ save data to HDF""" print 'Saving data to', dataFile store = HDFStore(dataFile) for symbol in self.wp.items: store[symbol] = self.wp[symbol] store.close() def downloadData(self,symbols='all'): ''' get data from yahoo ''' if symbols == 'all': symbols = self.symbols #store = HDFStore(self.dataFile) p = ProgressBar(len(symbols)) for idx,symbol in enumerate(symbols): try: df = getHistoricData(symbol,self.startDate,verbose=False) if self.autoAdjust: df = _adjust(df,removeOrig=True) if len(self.symbols)==0: self.wp = WidePanel({symbol:df}) else: self.wp[symbol] = df except Exception,e: print e p.animate(idx+1) def getDataFrame(self,field='close'): ''' return a slice on wide panel for a given field ''' return self.wp.minor_xs(field) @property def symbols(self): return self.wp.items.tolist() def __repr__(self): return str(self.wp) def getQuote(symbols): ''' get current yahoo quote, return a DataFrame ''' # for codes see: http://www.gummy-stuff.org/Yahoo-data.htm if not isinstance(symbols,list): symbols = [symbols] header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap'] request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1']) data = dict(zip(header,[[] for i in range(len(header))])) urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request) try: lines = urllib2.urlopen(urlStr).readlines() except Exception, e: s = "Failed to download:\n{0}".format(e); print s for line in lines: fields = line.strip().split(',') #print fields, len(fields) for i,field in enumerate(fields): data[header[i]].append( parseStr(field)) idx = data.pop('symbol') return DataFrame(data,index=idx) def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]): """ generate url symbol: Yahoo finanance symbol sDate: start date (y,m,d) eDate: end date (y,m,d) """ urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\ format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0]) return urlStr def getHistoricData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3],verbose=True): """ get data from Yahoo finance and return pandas dataframe symbol: Yahoo finanance symbol sDate: start date (y,m,d) eDate: end date (y,m,d) """ urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\ format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0]) try: lines = urllib2.urlopen(urlStr).readlines() except Exception, e: s = "Failed to download:\n{0}".format(e); print s dates = [] data = [[] for i in range(6)] #high # header : Date,Open,High,Low,Close,Volume,Adj Close for line in lines[1:]: #print line fields = line.rstrip().split(',') dates.append(datetime.strptime( fields[0],'%Y-%m-%d')) for i,field in enumerate(fields[1:]): data[i].append(float(field)) idx = Index(dates) data = dict(zip(['open','high','low','close','volume','adj_close'],data)) # create a pandas dataframe structure df = DataFrame(data,index=idx).sort() if verbose: print 'Got %i days of data' % len(df) return df def _adjust(df, removeOrig=False): ''' _adjustust hist data based on adj_close field ''' c = df['close']/df['adj_close'] df['adj_open'] = df['open']/c df['adj_high'] = df['high']/c df['adj_low'] = df['low']/c if removeOrig: df=df.drop(['open','close','high','low'],axis=1) renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low'])) df=df.rename(columns=renames) return df def getScreenerSymbols(fileName): ''' read symbols from a .csv saved by yahoo stock screener ''' with open(fileName,'r') as fid: lines = fid.readlines() symbols = [] for line in lines[3:]: fields = line.strip().split(',') field = fields[0].strip() if len(field) > 0: symbols.append(field) return symbols
lib/yahooFinance.py
7,224
-*- coding: utf-8 -*- Author: Jev Kuznetsov <jev.kuznetsov@gmail.com> License: BSD try float conversion do we have a Billion/Million character? no, convert to float directly failed, return original stringstore = HDFStore(self.dataFile) for codes see: http://www.gummy-stuff.org/Yahoo-data.htmprint fields, len(fields)high header : Date,Open,High,Low,Close,Volume,Adj Closeprint line create a pandas dataframe structure
426
en
0.430561
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Yahoo! Finance market data downloader (+fix for Pandas Datareader) # https://github.com/ranaroussi/yfinance """Yahoo! Finance market data downloader (+fix for Pandas Datareader)""" from setuptools import setup, find_packages # from codecs import open import io from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with io.open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='yfinance', version="0.1.46", description='Yahoo! Finance market data downloader', long_description=long_description, url='https://github.com/ranaroussi/yfinance', author='Ran Aroussi', author_email='ran@aroussi.com', license='Apache', classifiers=[ 'License :: OSI Approved :: Apache Software License', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', 'Development Status :: 5 - Production/Stable', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Topic :: Office/Business :: Financial', 'Topic :: Office/Business :: Financial :: Investment', 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], platforms=['any'], keywords='pandas, yahoo finance, pandas datareader', packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']), install_requires=['pandas>=0.24', 'numpy>=1.15', 'requests>=2.20', 'multitasking>=0.0.7'], entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, )
setup.py
2,074
Yahoo! Finance market data downloader (+fix for Pandas Datareader) !/usr/bin/env python -*- coding: UTF-8 -*- Yahoo! Finance market data downloader (+fix for Pandas Datareader) https://github.com/ranaroussi/yfinance from codecs import open Get the long description from the README file 'Development Status :: 3 - Alpha', 'Development Status :: 4 - Beta',
355
en
0.538845
""" render_fmo.py renders obj file to rgb image with fmo model Aviable function: - clear_mash: delete all the mesh in the secene - scene_setting_init: set scene configurations - node_setting_init: set node configurations - render: render rgb image for one obj file and one viewpoint - render_obj: wrapper function for render() render - init_all: a wrapper function, initialize all configurations = set_image_path: reset defualt image output folder author baiyu modified by rozumden """ import sys import os import random import pickle import bpy import glob import numpy as np from mathutils import Vector from mathutils import Euler import cv2 from PIL import Image from skimage.draw import line_aa from scipy import signal from skimage.measure import regionprops # import moviepy.editor as mpy from array2gif import write_gif abs_path = os.path.abspath(__file__) sys.path.append(os.path.dirname(abs_path)) from render_helper import * from settings import * import settings import pdb def renderTraj(pars, H): ## Input: pars is either 2x2 (line) or 2x3 (parabola) if pars.shape[1] == 2: pars = np.concatenate( (pars, np.zeros((2,1))),1) ns = 2 else: ns = 5 ns = np.max([2, ns]) rangeint = np.linspace(0,1,ns) for timeinst in range(rangeint.shape[0]-1): ti0 = rangeint[timeinst] ti1 = rangeint[timeinst+1] start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0) end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1) start = np.round(start).astype(np.int32) end = np.round(end).astype(np.int32) rr, cc, val = line_aa(start[0], start[1], end[0], end[1]) valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0)) rr = rr[valid] cc = cc[valid] val = val[valid] if len(H.shape) > 2: H[rr, cc, 0] = 0 H[rr, cc, 1] = 0 H[rr, cc, 2] = val else: H[rr, cc] = val return H def open_log(temp_folder = g_temp): # redirect output to log file logfile = os.path.join(temp_folder,'blender_render.log') try: os.remove(logfile) except OSError: pass open(logfile, 'a').close() old = os.dup(1) sys.stdout.flush() os.close(1) os.open(logfile, os.O_WRONLY) return old def close_log(old): # disable output redirection os.close(1) os.dup(old) os.close(old) def clear_mesh(): """ clear all meshes in the secene """ bpy.ops.object.select_all(action='DESELECT') for obj in bpy.data.objects: if obj.type == 'MESH': obj.select = True bpy.ops.object.delete() for block in bpy.data.meshes: if block.users == 0: bpy.data.meshes.remove(block) for block in bpy.data.materials: if block.users == 0: bpy.data.materials.remove(block) for block in bpy.data.textures: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.images: if block.users == 0: bpy.data.images.remove(block) def scene_setting_init(use_gpu): """initialize blender setting configurations """ sce = bpy.context.scene.name bpy.data.scenes[sce].render.engine = g_engine_type bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent #output bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension if g_ambient_light: world = bpy.data.worlds['World'] world.use_nodes = True bg = world.node_tree.nodes['Background'] bg.inputs[0].default_value[:3] = g_bg_color bg.inputs[1].default_value = 1.0 #dimensions bpy.data.scenes[sce].render.resolution_x = g_resolution_x bpy.data.scenes[sce].render.resolution_y = g_resolution_y bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage if use_gpu: bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices) print('Number of devices {}'.format(ndev)) for ki in range(2,ndev): bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' # bpy.types.CyclesRenderSettings.device = 'GPU' bpy.data.scenes[sce].cycles.device = 'GPU' def node_setting_init(): bpy.context.scene.use_nodes = True tree = bpy.context.scene.node_tree links = tree.links for node in tree.nodes: tree.nodes.remove(node) render_layer_node = tree.nodes.new('CompositorNodeRLayers') image_output_node = tree.nodes.new('CompositorNodeOutputFile') image_output_node.base_path = g_syn_rgb_folder links.new(render_layer_node.outputs[0], image_output_node.inputs[0]) # image_output_node = bpy.context.scene.node_tree.nodes[1] image_output_node.base_path = g_temp image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder # def render(obj_path, viewpoint, temp_folder): """render rbg image render a object rgb image by a given camera viewpoint and choose random image as background, only render one image at a time. Args: obj_path: a string variable indicate the obj file path viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance) """ vp = viewpoint cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance) cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt) cam_obj = bpy.data.objects['Camera'] cam_obj.location[0] = cam_location[0] cam_obj.location[1] = cam_location[1] cam_obj.location[2] = cam_location[2] cam_obj.rotation_euler[0] = cam_rot[0] cam_obj.rotation_euler[1] = cam_rot[1] cam_obj.rotation_euler[2] = cam_rot[2] if not os.path.exists(g_syn_rgb_folder): os.mkdir(g_syn_rgb_folder) obj = bpy.data.objects['model_normalized'] ni = g_fmo_steps maxlen = 0.5 maxrot = 1.57/6 tri = 0 # rot_base = np.array([math.pi/2,0,0]) while tri <= g_max_trials: do_repeat = False tri += 1 if not g_apply_texture: for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue for tempi in range(len(bpy.data.objects[oi].data.materials)): if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0: return True, True ## transparent object los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen))) loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi))) rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni old = open_log(temp_folder) for ki in [0, ni-1]+list(range(1,ni-1)): for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue bpy.data.objects[oi].location = los_start + loc_step*ki bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki)) bpy.context.scene.frame_set(ki + 1) bpy.ops.render.render(write_still=True) #start rendering if ki == 0 or ki == (ni-1): Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0 is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0 if is_border: if ki == 0: close_log(old) return False, True ## sample different starting viewpoint else: do_repeat = True ## just sample another motion direction if do_repeat: break close_log(old) if do_repeat == False: break if do_repeat: ## sample different starting viewpoint return False, True return False, False def make_fmo(path, gt_path, video_path): n_im = 5 background_images = os.listdir(g_background_image_path) seq_name = random.choice(background_images) seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.jpg")) if len(seq_images) <= n_im: seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,"*.png")) seq_images.sort() bgri = random.randint(n_im,len(seq_images)-1) bgr_path = seq_images[bgri] B0 = cv2.imread(bgr_path)/255 B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC) B[B > 1] = 1 B[B < 0] = 0 FH = np.zeros(B.shape) MH = np.zeros(B.shape[:2]) pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,)) centroids = np.zeros((2,g_fmo_steps)) for ki in range(g_fmo_steps): FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max props = regionprops((FM[:,:,-1,ki]>0).astype(int)) if len(props) != 1: return False centroids[:,ki] = props[0].centroid for ki in range(g_fmo_steps): F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki] M = FM[:,:,-1,ki] if ki < g_fmo_steps-1: pars[:,1] = centroids[:,ki+1] - centroids[:,ki] H = renderTraj(pars, np.zeros(B.shape[:2])) H /= H.sum()*g_fmo_steps for kk in range(3): FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same') MH += signal.fftconvolve(H, M, mode='same') Im = FH + (1 - MH)[:,:,np.newaxis]*B Im[Im > 1] = 1 Im[Im < 0] = 0 if g_skip_low_contrast: Diff = np.sum(np.abs(Im - B),2) meanval = np.mean(Diff[MH > 0.05]) print("Contrast {}".format(meanval)) if meanval < 0.2: return False if g_skip_small: sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1]) print("Size percentage {}".format(sizeper)) if sizeper < 0.05: return False Im = Im[:,:,[2,1,0]] Ims = Image.fromarray((Im * 255).astype(np.uint8)) Ims.save(path) Ball = np.zeros(B.shape+(n_im,)) Ball[:,:,:,0] = B for ki in range(1,n_im): bgrki_path = seq_images[bgri-ki] Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC) Ball[Ball > 1] = 1 Ball[Ball < 0] = 0 Bmed = np.median(Ball,3) Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png')) Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png')) # Ims.save(os.path.join(g_temp,"I.png")) # Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png")) # Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png")) # Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png")) # Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png")) # Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png")) if False: Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:]) Fwr = (Fwr * 255).astype(np.uint8) # Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255 out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*"MJPG"), 6, (F.shape[1],F.shape[0]),True) for ki in range(g_fmo_steps): out.write(Fwr[:,:,:,ki]) out.release() return True def render_obj(obj_path, path, objid, obj_name, temp_folder): """ render one obj file by a given viewpoint list a wrapper function for render() Args: obj_path: a string variable indicate the obj file path """ vps_path = random.sample(g_view_point_file, 1)[0] vps = list(load_viewpoint(vps_path)) random.shuffle(vps) save_path = os.path.join(path,"{}_{:04d}.png".format(obj_name,objid)) gt_path = os.path.join(path,"GT","{}_{:04d}".format(obj_name,objid)) video_path = os.path.join(path,"{}_{:04d}.avi".format(obj_name,objid)) if not os.path.exists(gt_path): os.mkdir(gt_path) image_output_node = bpy.context.scene.node_tree.nodes[1] image_output_node.base_path = gt_path for imt in bpy.data.images: bpy.data.images.remove(imt) if g_apply_texture: for oi in range(len(bpy.data.objects)): if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP': continue bpy.context.scene.objects.active = bpy.data.objects[oi] # pdb.set_trace() # for m in bpy.data.materials: # bpy.data.materials.remove(m) # bpy.ops.object.material_slot_remove() bpy.ops.object.editmode_toggle() bpy.ops.uv.cube_project() bpy.ops.object.editmode_toggle() texture_images = os.listdir(g_texture_path) texture = random.choice(texture_images) tex_path = os.path.join(g_texture_path,texture) # mat = bpy.data.materials.new(texture) # mat.use_nodes = True # nt = mat.node_tree # nodes = nt.nodes # links = nt.links # # Image Texture # textureNode = nodes.new("ShaderNodeTexImage") # textureNode.image = bpy.data.images.load(tex_path) # links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color']) # mat.specular_intensity = 0 # bpy.data.objects[oi].active_material = mat # print(bpy.data.objects[oi].active_material) for mat in bpy.data.materials: nodes = mat.node_tree.nodes links = mat.node_tree.links textureNode = nodes.new("ShaderNodeTexImage") textureNode.image = bpy.data.images.load(tex_path) links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color']) # print(bpy.data.objects[oi].active_material) tri = 0 while tri <= g_max_trials: tri += 1 vp = random.sample(vps, 1)[0] sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder) if sample_different_vp: if sample_different_object: print('Transparent object!') return False print('Rendering failed, repeating') continue success = make_fmo(save_path, gt_path, video_path) if success: return True print('Making FMO failed, repeating') return False def init_all(): """init everything we need for rendering an image """ scene_setting_init(g_gpu_render_enable) node_setting_init() cam_obj = bpy.data.objects['Camera'] cam_obj.rotation_mode = g_rotation_mode if g_render_light: bpy.data.objects['Lamp'].data.energy = 50 bpy.ops.object.lamp_add(type='SUN') bpy.data.objects['Sun'].data.energy = 5 ### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA init_all() argv = sys.argv argv = argv[argv.index("--") + 1:] start_index = int(argv[0]) step_index = int(argv[1]) print('Start index {}, step index {}'.format(start_index, step_index)) temp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/' for obj_name in g_render_objs[start_index:(start_index+step_index)]: print("Processing object {}".format(obj_name)) obj_folder = os.path.join(g_syn_rgb_folder, obj_name) if not os.path.exists(obj_folder): os.makedirs(obj_folder) if not os.path.exists(os.path.join(obj_folder,"GT")): os.mkdir(os.path.join(obj_folder,"GT")) num = g_shapenet_categlory_pair[obj_name] search_path = os.path.join(g_shapenet_path, num, '**','*.obj') pathes = glob.glob(search_path, recursive=True) random.shuffle(pathes) objid = 1 tri = 0 while objid <= g_number_per_category: print(" instance {}".format(objid)) clear_mesh() path = random.sample(pathes, 1)[0] old = open_log(temp_folder) bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_split_groups=False, use_split_objects=True) # bpy.ops.import_scene.obj(filepath=path) close_log(old) #combine_objects() #scale_objects(0.5) result = render_obj(path, obj_folder, objid, obj_name, temp_folder) if result: objid += 1 tri = 0 else: print('Error! Rendering another object from the category!') tri += 1 if tri > g_max_trials: print('No object find in the category!!!!!!!!!') break
renderer/render_fmo.py
18,376
clear all meshes in the secene init everything we need for rendering an image render rbg image render a object rgb image by a given camera viewpoint and choose random image as background, only render one image at a time. Args: obj_path: a string variable indicate the obj file path viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance) render one obj file by a given viewpoint list a wrapper function for render() Args: obj_path: a string variable indicate the obj file path initialize blender setting configurations render_fmo.py renders obj file to rgb image with fmo model Aviable function: - clear_mash: delete all the mesh in the secene - scene_setting_init: set scene configurations - node_setting_init: set node configurations - render: render rgb image for one obj file and one viewpoint - render_obj: wrapper function for render() render - init_all: a wrapper function, initialize all configurations = set_image_path: reset defualt image output folder author baiyu modified by rozumden import moviepy.editor as mpy Input: pars is either 2x2 (line) or 2x3 (parabola) redirect output to log file disable output redirectionoutputdimensionsonly cycles engine can use gpu bpy.types.CyclesRenderSettings.device = 'GPU' image_output_node = bpy.context.scene.node_tree.nodes[1] blender placeholder rot_base = np.array([math.pi/2,0,0]) transparent objectstart rendering sample different starting viewpoint just sample another motion direction sample different starting viewpoint Ims.save(os.path.join(g_temp,"I.png")) Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"FH.png")) Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,"MH.png")) Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,"M.png")) Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"F.png")) Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,"B.png")) Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255 pdb.set_trace() for m in bpy.data.materials: bpy.data.materials.remove(m) bpy.ops.object.material_slot_remove() mat = bpy.data.materials.new(texture) mat.use_nodes = True nt = mat.node_tree nodes = nt.nodes links = nt.links Image Texture textureNode = nodes.new("ShaderNodeTexImage") textureNode.image = bpy.data.images.load(tex_path) links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color']) mat.specular_intensity = 0 bpy.data.objects[oi].active_material = mat print(bpy.data.objects[oi].active_material) print(bpy.data.objects[oi].active_material) YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA bpy.ops.import_scene.obj(filepath=path)combine_objects()scale_objects(0.5)
2,792
en
0.457395
import json from urllib.parse import parse_qs from urllib.parse import urlparse from Cryptodome.PublicKey import RSA from jwkest import b64e from jwkest.jwk import RSAKey from jwkest.jwk import load_jwks from oic.extension.message import TokenIntrospectionResponse from oic.extension.signed_http_req import SignedHttpRequest from oic.oauth2 import compact from oic.utils.jwt import JWT from oic.utils.keyio import KeyBundle __author__ = 'roland' def sign_http_args(method, url, headers, body=''): p = urlparse(url) kwargs = {'path': p.path, 'host': p.netloc, 'headers': headers, 'method': method} if body: kwargs['body'] = body query_params = compact(parse_qs(p.query)) kwargs['query_params'] = query_params return kwargs class PoPCallBack(object): def __init__(self, key, alg): self.key = key self.alg = alg def __call__(self, method, url, **kwargs): try: body = kwargs['body'] except KeyError: body = None try: headers = kwargs['headers'] except KeyError: headers = {} _kwargs = sign_http_args(method, url, headers, body) shr = SignedHttpRequest(self.key) kwargs['Authorization'] = 'pop {}'.format(shr.sign(alg=self.alg, **_kwargs)) return kwargs class PoPClient(object): def __init__(self, key_size=2048, sign_alg='RS256'): self.key_size = key_size self.state2key = {} self.token2key = {} self.alg = sign_alg def update(self, msg, state, key_size=0): """ Use to 'update' the AccessToken Request. :param msg: :param state: Used to map access token response to this request :param key_size: :return: """ if not key_size: key_size = self.key_size key = RSAKey(key=RSA.generate(key_size)) self.state2key[state] = key msg['key'] = json.dumps(key.serialize()) return msg def handle_access_token_response(self, resp): """ Map access token to a keypair. :param resp: AccessTokenResponse instance """ self.token2key[resp['access_token']] = self.state2key[resp['state']] class PoPAS(object): def __init__(self, me): self.thumbprint2key = {} self.keyjar = None self.me = me def store_key(self, key): kb = KeyBundle() kb.do_keys([key]) # Store key with thumbprint as key key_thumbprint = b64e(kb.keys()[0].thumbprint('SHA-256')).decode( 'utf8') self.thumbprint2key[key_thumbprint] = key return key_thumbprint def create_access_token(self, key_thumbprint): # creating the access_token jwt_constructor = JWT(self.keyjar, iss=self.me) # Audience is myself return jwt_constructor.pack( kid='abc', cnf={'kid': key_thumbprint}, aud=self.me) def token_introspection(self, token): jwt_constructor = JWT(self.keyjar, iss=self.me) res = jwt_constructor.unpack(token) tir = TokenIntrospectionResponse(active=True) tir['key'] = json.dumps(self.thumbprint2key[res['cnf']['kid']]) return tir class PoPRS(object): def __init__(self): self.token2key = {} def store_key(self, access_token, tir): """ Store key that was returned in response from token introspection. :param access_token: The token that was introspected :param tir: TokenIntrospectionResponse instance """ key = load_jwks(json.dumps({'keys': [json.loads(tir['key'])]})) self.token2key[access_token] = key def eval_signed_http_request(self, pop_token, access_token, method, url, headers, body=''): kwargs = sign_http_args(method, url, headers, body) shr = SignedHttpRequest(self.token2key[access_token][0]) return shr.verify(signature=pop_token, strict_query_params_verification=True, strict_headers_verification=True, **kwargs)
src/oic/extension/pop.py
4,202
Map access token to a keypair. :param resp: AccessTokenResponse instance Store key that was returned in response from token introspection. :param access_token: The token that was introspected :param tir: TokenIntrospectionResponse instance Use to 'update' the AccessToken Request. :param msg: :param state: Used to map access token response to this request :param key_size: :return: Store key with thumbprint as key creating the access_token Audience is myself
465
en
0.853964
#!/usr/bin/python from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate from subprocess import * from time import sleep, strftime from datetime import datetime from mpd import * import threading import signal import sys import os from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import json PLAY=0 PAUSE=1 STOP=2 VOL=3 LCDon=4 lcd = Adafruit_CharLCDPlate() # create LCD object client = MPDClient() # create MPD client object lock = threading.Lock() home=os.path.dirname(os.path.realpath(__file__)) class pimp3clock_HTTPRequesthandler(BaseHTTPRequestHandler): def do_GET(self): try: if '?' in self.path: self.path,q = self.path.split('?', 1) if self.path.endswith(".js") or self.path.endswith(".css") or self.path.endswith(".png") or self.path.endswith(".gif") or self.path.endswith(".html"): f = open(home + "/web/" + self.path) self.send_response(200) if self.path.endswith(".js"): self.send_header('Content-type', 'text/javascript') elif self.path.endswith(".css"): self.send_header('Content-type', 'text/css') elif self.path.endswith(".png"): self.send_header('Content-type', 'image/png') elif self.path.endswith(".gif"): self.send_header('Content-type', 'image/gif') elif self.path.endswith(".html"): self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(f.read()) f.close() return elif self.path.endswith(".json"): self.send_response(200) self.send_header('Content-type', 'text/javascript') self.end_headers() if self.path.endswith("status.json"): lock.acquire() song = client.currentsong() status = client.status() lock.release() self.wfile.write(json.dumps({'song': song, 'status': status})) return elif self.path.endswith("select.json"): lock.acquire() status = client.status() if status['state'] == "stop": client.play() elif status['state'] == "play": client.pause(1) elif status['state'] == "pause": client.pause(0) lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("next.json"): lock.acquire() client.next() lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("previous.json"): lock.acquire() client.previous() lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("volume.json"): key, value = q.split('=',1) if (value < 1): value=1 lock.acquire() client.setvol(value) lock.release() self.wfile.write(json.dumps("OK")) return elif self.path.endswith("update.json"): lock.acquire() mpd_update() lock.release() return elif self.path.endswith("background.json"): lock.acquire() key, value = q.split('=',1) LCDon=int(value) lcd.backlight(LCDon) lock.release() return return else: self.send_response(301) self.send_header('Location', 'index.html') self.end_headers() return return except IOError: self.send_error(404,'File Not Found: {0} (Home: {1})'.format(self.path, home)) def do_POST(self): try: print "POST" except: pass def mpd_update(): # Load Database into current playlist client.update() client.clear() database=client.listall("/") for (i) in range(len(database)): if 'file' in database[i]: client.add(database[i]['file']) client.random(1) client.shuffle(1) client.crossfade(2) def display_lcd(title_a,st_a,vol_a): LCDoff=lcd.OFF LCDState=LCDoff LCDOffDelay=30 LCDOffCountdown=LCDOffDelay lcd.backlight(LCDon) lcd.clear() lcd.begin(16,1) play=[ 0b10000, 0b11000, 0b11100, 0b11110, 0b11100, 0b11000, 0b10000, 0b00000 ] lcd.createChar(PLAY,play) pause=[ 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011, 0b11011 ] lcd.createChar(PAUSE,pause) stop=[ 0b00000, 0b11111, 0b10001, 0b10001, 0b10001, 0b10001, 0b11111, 0b00000 ] lcd.createChar(STOP,stop) t=0 i=0 fr=1 oldtitle="" while 1: lock.acquire() vol=[] vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b00000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b00000,0b10000,0b10000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00000,0b01000,0b11000,0b11000]) vol.append([0b00000,0b00000,0b00000,0b00000,0b00100,0b01100,0b11100,0b11100]) vol.append([0b00000,0b00000,0b00000,0b00010,0b00110,0b01110,0b11110,0b11110]) vol.append([0b00000,0b00000,0b00001,0b00011,0b00111,0b01111,0b11111,0b11111]) volbar=int((vol_a[0]+5)/(100/5)) lcd.createChar(VOL,vol[volbar]) try: if (t % 2) == 0: lcd.home() lcd.write(VOL,True) # Special Characters lcd.message(datetime.now().strftime('%d.%b %H:%M:%S')) else: title=title_a[0] if title != oldtitle: fr=1 i=0 oldtitle=title st=st_a[0] lcd.clear() lcd.write(VOL,True) # Special Characters lcd.message(datetime.now().strftime('%d.%b %H %M %S\n')) lcd.write(st,True) # Special Characters lcd.message('%s' % (title[i:15+i]) ) if ((st == PAUSE) or (st == STOP)): LCDOffCountdown=LCDOffCountdown-1 else: if (LCDOffCountdown==0): lcd.backlight(LCDon) LCDOffCountdown=LCDOffDelay if (LCDOffCountdown < 1): lcd.backlight(LCDoff) LCDOffCountdown=0 if fr==1: i=i+1 else: i=i-1 if i>len(title)-15: fr=0 if i==0: fr=1 finally: lock.release() t=t+1 sleep(0.5) def webserver(): server.serve_forever() def main_loop(): i=0; title_a=[None] st_a=[None] vol_a=[None] title_a[0]="" st_a[0]=STOP vol_a[0]=0 display_thread = threading.Thread(target=display_lcd, args=(title_a,st_a,vol_a)) display_thread.daemon=True # Causing thread to stop when main process ends. display_thread.start() webserver_thread = threading.Thread(target=webserver, args=()) webserver_thread.daemon=True # Causing thread to stop when main process ends. webserver_thread.start() client.connect("localhost", 6600) # connect to localhost:6600 mpd_update() last_button=100; while 1: lock.acquire() status = client.status() vol_a[0]=int(status['volume']) lock.release() if (i % 5) == 0: lock.acquire() song = client.currentsong() lock.release() if song == {}: title_a[0]="" else: title_a[0]=song['artist'] + " - " + song['title'] if status['state'] == "stop": st_a[0]=STOP elif status['state'] == "play": st_a[0]=PLAY elif status['state'] == "pause": st_a[0]=PAUSE lock.acquire() try: button = lcd.buttons() finally: lock.release() if ((button & 1) == 1) and (last_button != button): # SELECT if status['state'] == "stop": lock.acquire() client.play() lock.release() elif status['state'] == "play": lock.acquire() client.pause(1) lock.release() elif status['state'] == "pause": lock.acquire() client.pause(0) lock.release() elif ((button & 2) == 2) and (last_button != button): # RIGHT client.next() elif (button & 4) == 4: # DOWN if int(status['volume']) >1: lock.acquire() client.setvol(int(status['volume']) - 1) lock.release() elif (button & 8) == 8: # UP if int(status['volume']) <100: lock.acquire() client.setvol(int(status['volume']) + 1) lock.release() elif ((button & 16) == 16) and (last_button != button): # LEFT lock.acquire() client.previous() lock.release() last_button=button i=i+1; sleep(0.1) def shutdown(): client.stop() client.close() # send the close command client.disconnect() # disconnect from the server lcd.clear(); lcd.stop(); def sig_handler(signum = None, frame = None): shutdown() sys.exit(0) try: for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]: signal.signal(sig, sig_handler) server = HTTPServer(('',80), pimp3clock_HTTPRequesthandler) main_loop() except (KeyboardInterrupt, SystemExit): shutdown()
pimp3clock.py
9,609
!/usr/bin/python create LCD object create MPD client object Load Database into current playlist Special Characters Special Characters Special Characters Causing thread to stop when main process ends. Causing thread to stop when main process ends. connect to localhost:6600 SELECT RIGHT DOWN UP LEFT send the close command disconnect from the server
348
en
0.815194
"""Backup handler This script is contains the backup handling functions. """ import os import time import pickle import shutil from shutil import ignore_patterns import pypianoroll import numpy as np def backup_pickle(experiment, stats): '''' Back up handling function. Arguments: experiment -- Experiment object, contains the initial sorn parameters stats -- bunch of stats stored during the simulation ''' params = experiment.init_params results_dir = experiment.results_dir files_tosave = experiment.files_tosave directory = ('backup/{}'.format(results_dir)) # creates a new directory for storing the results # sleeps for a short time to avoid conflicts when running in parallel time.sleep(np.random.rand()) for n_sim in range(1, 1000): final_dir = '{}_{}/'.format(directory, str(n_sim)) if not os.path.exists(final_dir): try: os.makedirs(final_dir) break except: pass if 'params' in files_tosave: with open(final_dir+'init_params.p', 'wb') as f: pickle.dump(params, f) if 'stats' in files_tosave: # generate MIDI track if MusicTask if hasattr(stats, 'track'): stats.track.write(final_dir+'sample.mid') # delete attributes that occupy a lot of memory space if hasattr(stats, 'input_index_readout'): del stats.input_index_readout if hasattr(stats, 'input_readout'): del stats.input_readout if hasattr(stats, 'raster_readout'): del stats.raster_readout if hasattr(stats, 't_past'): del stats.t_past with open(final_dir+'stats.p', 'wb') as f: pickle.dump(stats, f) if 'scripts' in files_tosave: # TODO: this should not need a '_' for f in ['utils', 'common', results_dir.split('_')[0]]: shutil.copytree(f, final_dir+f, ignore=ignore_patterns('*.pyc', '*.git'))
utils/backup.py
2,041
' Back up handling function. Arguments: experiment -- Experiment object, contains the initial sorn parameters stats -- bunch of stats stored during the simulation Backup handler This script is contains the backup handling functions. creates a new directory for storing the results sleeps for a short time to avoid conflicts when running in parallel generate MIDI track if MusicTask delete attributes that occupy a lot of memory space TODO: this should not need a '_'
470
en
0.794384
from devices import network_devices from napalm import get_network_driver from pprint import pprint def open_napalm_connection(device): """Funtion to open napalm connection and return connection object""" # Copy dictionary to ensure original object is not modified device=device.copy() # Pop "platform" as this is an invalid kwarg to napalm platform = device.pop('platform') driver = get_network_driver(platform) conn = driver(**device) conn.open() return(conn) def main(): connections = [] for device in network_devices: conn = open_napalm_connection(device) connections.append(conn) print ("\n\n") print("Print facts for all devices in connections list") print("-" * 20) for conn in connections: print() print("-" * 6) print(conn) pprint("{} facts:".format(conn.platform)) pprint(conn.get_facts()) print("-" * 6) # Close the NAPALM connection conn.close() print("\n\n") if __name__ == "__main__": main()
day3/linting/exercise1.py
1,061
Funtion to open napalm connection and return connection object Copy dictionary to ensure original object is not modified Pop "platform" as this is an invalid kwarg to napalm Close the NAPALM connection
203
en
0.805035
# -*- coding: utf-8 -*- """ Created on Sun Feb 12 11:56:36 2017 Problemset1 - Problem 1 Note: 's' is given by system like s = 'azcbobobegghakl' @author: coskun """ s = 'azcbobobegghakl' # Paste your code into this box nvl=0 for c in s: if c=='a' or c=='e' or c=='i' or c=='o' or c=='u': nvl += 1 print("Number of vowels: " + str(nvl))
anaconda/6.00.1x.PSet1.P1.py
352
Created on Sun Feb 12 11:56:36 2017 Problemset1 - Problem 1 Note: 's' is given by system like s = 'azcbobobegghakl' @author: coskun -*- coding: utf-8 -*- Paste your code into this box
190
en
0.837984
import psycopg2 # Returns connection to the DB def get_sql_connection(): conn = psycopg2.connect(user="cqwhbabxmaxxqd", password="a3063dc5aeec69b41564cd0f1e3c698e0ff9653385f3b87c0f113b70951eb5b3", host="ec2-54-235-92-244.compute-1.amazonaws.com", port="5432", database="d8d34m4nml4iij") return conn
Attendance/context/sql_connection.py
421
Returns connection to the DB
28
en
0.693818
#################################################################################### # Jiten Dhandha, 2020 # # CFit is a curve fitting tool in python, based on the method of least squares. # # It comes equipped with some standard functions and a graphical user interface. # # # # Inspired by: LSFR.py, Abie Marshall, The University of Manchester, 2016 # #################################################################################### #################################################################################### # LIBRARIES # #################################################################################### import numpy as np import matplotlib matplotlib.use('Qt5Agg') #This requires PyQt5 to be installed. import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import scipy.optimize as opt import scipy.special as sp import scipy.stats as stats import scipy.linalg as linalg import warnings #################################################################################### # LIST OF FUNCTIONS # #################################################################################### #Class to hold all relevant function information class Function(): def __init__(self,name,func,numberOfParameters,rawFuncStr,unicodeFuncStr,rawParametersStr,unicodeParametersStr): self.name = name self.func = func self.numberOfParameters = numberOfParameters self.rawFuncStr = rawFuncStr self.unicodeFuncStr = unicodeFuncStr self.rawParametersStr = rawParametersStr self.unicodeParametersStr = unicodeParametersStr ''' Current supported functions are as follows: Polynomial: constant, linear, quadratic, cubic, quartic, quintic Periodic functions: sine wave, square wave Peak shape functions: gaussian, poisson, laplace, lorentz Polynomial-based functions: power law Exponentials and logarithms: exponential, logarithm ''' #Dictonary to hold the functions functions = { 'Constant': Function(name='Constant', func=lambda x,a: np.polyval([a],x), numberOfParameters=1, rawFuncStr=r"$y = a$", unicodeFuncStr="y = a", rawParametersStr=[r'$a$'], unicodeParametersStr=['a']), 'Linear': Function(name='Linear', func=lambda x,a,b: np.polyval([a,b],x), numberOfParameters=2, rawFuncStr=r"$y = ax+b$", unicodeFuncStr="y = ax+b", rawParametersStr=[r'$a$',r'$b$'], unicodeParametersStr=['a','b']), 'Quadratic': Function(name='Quadratic', func=lambda x,a,b,c: np.polyval([a,b,c],x), numberOfParameters=3, rawFuncStr=r"$y = ax^2+bx+c$", unicodeFuncStr="y = ax\u00B2+bx+c", rawParametersStr=[r'$a$',r'$b$',r'$c$'], unicodeParametersStr=['a','b','c']), 'Cubic': Function(name='Cubic', func=lambda x,a,b,c,d: np.polyval([a,b,c,d],x), numberOfParameters=4, rawFuncStr=r"$y = ax^3+bx^2+cx+d$", unicodeFuncStr="y = ax\u00B3+bx\u00B2+cx+d", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$'], unicodeParametersStr=['a','b','c','d']), 'Quartic': Function(name='Quadratic', func=lambda x,a,b,c,d,e: np.polyval([a,b,c,d,e],x), numberOfParameters=5, rawFuncStr=r"$y = ax^4+bx^3+cx^2+dx+e$", unicodeFuncStr="y = ax\u2074+bx\u00B3+cx\u00B2+dx+e", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$'], unicodeParametersStr=['a','b','c','d','e']), 'Quintic': Function(name='Quintic', func=lambda x,a,b,c,d,e,f: np.polyval([a,b,c,d,e,f],x), numberOfParameters=6, rawFuncStr=r"$y = ax^5+bx^4+cx^3+dx^2+ex+f$", unicodeFuncStr="y = ax\u2075+bx\u2074+cx\u00B3+dx\u00B2+ex+f", rawParametersStr=[r'$a$',r'$b$',r'$c$',r'$d$',r'$e$',r'$f$'], unicodeParametersStr=['a','b','c','d','e','f']), 'Sine wave': Function(name='Sine wave', func=lambda x,y0,A,omg,phi: y0 + A*np.sin(omg*x+phi), numberOfParameters=4, rawFuncStr=r"$y = y_0 + A[\sin(\omega x+\phi)]$", unicodeFuncStr="y = y\u2080 + A sin(\u03C9x+\u03D5)", rawParametersStr=[r'$y_0$',r'$A$',r'$\omega$',r'$\phi$'], unicodeParametersStr=['y\u2080','A','\u03C9','\u03D5']), 'Square wave': Function(name='Square wave', func=lambda x,y0,A,omg,phi: y0 + A*np.sign(np.sin(omg*x+phi)), numberOfParameters=4, rawFuncStr=r"$y = y_0 + A\/signum[\sin(\omega x+\phi)]$", unicodeFuncStr="y = y\u2080 + A signum[sin(\u03C9x+\u03D5)]", rawParametersStr=[r'$y_0$',r'$A$',r'$\omega$',r'$\phi$'], unicodeParametersStr=['y\u2080','A','\u03C9','\u03D5']), 'Gaussian': Function(name='Gaussian', func=lambda x,y0,A,mu,sig: y0 + (A/(sig*np.sqrt(2*np.pi)))*np.exp((-1/2)*((x-mu)/sig)**2), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{A}{\sigma \sqrt{2\pi}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}$", unicodeFuncStr="y = y\u2080 + A/[\u03C3 \u221A(2\u03C0)] \u00D7 e^[-(x-\u03BC)\u00B2/(2\u03C3\u00B2)]", rawParametersStr=[r'$y_0$',r'$A$',r'$\mu$',r'$\sigma$'], unicodeParametersStr=['y\u2080','A','\u03BC','\u03C3']), 'Poisson': Function(name='Poisson', func=lambda x,y0,A,lmd: y0 + A*(np.exp(-lmd))*(lmd**x)/sp.gamma(x), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/\frac{e^{-\lambda}\lambda^x}{x!}$", unicodeFuncStr="y = y\u2080 + A [(e^\u03BB)(\u03BB^x)]/x!", rawParametersStr=[r'$y_0$',r'$A$',r'$\lambda$'], unicodeParametersStr=['y\u2080','A','\u03BB']), 'Laplacian': Function(name='Laplacian', func=lambda x,y0,A,mu,b: y0 + (A/(2*b))*np.exp(-np.abs(x-mu)/b), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{A}{2b}e^{-\frac{|x-\mu|}{b}}$", unicodeFuncStr="y = y\u2080 + A/(2b) \u00D7 e^(-|(x-\u03BC)|/b)", rawParametersStr=[r'$y_0$',r'$A$',r'$\mu$',r'$b$'], unicodeParametersStr=['y\u2080','A','\u03BC','b']), 'Lorentzian': Function(name='Lorentzian', func=lambda x,y0,A,x0,omg: y0 + (2*A/np.pi)*(omg/(4*(x-x0)**2+omg**2)), numberOfParameters=4, rawFuncStr=r"$y = y_0 + \frac{2A}{\pi}\frac{\omega}{4(x-x_0)^2+\omega^2}$", unicodeFuncStr="y = y\u2080 + (2A/\u03C0) \u00D7 (\u03C9/[4(x-x\u2080)\u00B2+\u03C9\u00B2])", rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$',r'$\omega$'], unicodeParametersStr=['y\u2080','A','x\u2080','\u03C9']), 'Power': Function(name='Power', func=lambda x,A,b: A*(x)**b, numberOfParameters=2, rawFuncStr=r"$y = Ax^b$", unicodeFuncStr="y = A x\u1D47", rawParametersStr=[r'$A$',r'$b$'], unicodeParametersStr=['A','b']), 'Exponential': Function(name='Exponential', func=lambda x,y0,A,b: y0 + A*np.exp(b*x), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/e^{bx}$", unicodeFuncStr="y = y\u2080 + A e^(bx)", rawParametersStr=[r'$y_0$',r'$A$',r'$b$'], unicodeParametersStr=['y\u2080','A','b']), 'Logarithm': Function(name='Logarithm', func=lambda x,y0,A,x0: y0 + A*np.log(x-x0), numberOfParameters=3, rawFuncStr=r"$y = y_0 + A\/log(x-x_0)$", unicodeFuncStr="y = y\u2080 + A log(x-x\u2080)", rawParametersStr=[r'$y_0$',r'$A$',r'$x_0$'], unicodeParametersStr=['y\u2080','A','x\u2080']) } #################################################################################### # GLOBAL VARIABLES # #################################################################################### #DATA RELATED VARIABLES data = [] #holds the data from data file x = [] #holds the x values from the data file y = [] #holds the y values from the data file y_err = [] #holds the y errors, either from user file or generated ERR = bool #boolean to check if data file contains errors numberOfDataPoints = int #holds the number of points in the data file #FIT FUNCTION RELATED VARIABLES function = '' #string holding the function to fit to numberOfParameters = int #holds the number of parameters of the fitting function #FITTING VARIABLES fitStructure = [] #holds the fitting information from curve_fit/polyfit fitParameters = [] #holds the fitting parameters fitErrors = [] #holds the errors on the fitting parameters chiSquared = float #holds the final chi-squared value of the fit redChiSquared = float #holds the final reduced chi-squared value of the fit redChiSquaredLimits = [] #holds the "acceptable range" of reduced chi-squared #################################################################################### # READING USER FILE # #################################################################################### ''' This function tries to read the file held at fileLocation and sets the global variables that hold all the information about the data set. @Arguments: fileLocation - string containing the location of the file chosen by user. @Return value: Returns an integer that specifies success (0) or failure (non 0) of the function. ''' def readFile(fileLocation): #Access to global variables global data global x global y global y_err global ERR global numberOfDataPoints #Checking if the file string is empty if(fileLocation == ''): return 1 #Checking if the file is a .txt or .csv file if(not fileLocation.endswith('.txt') and not fileLocation.endswith('.csv') ): return 2 #Trying to populate the data array from the file (allows both spaces and commas) try: with open(fileLocation, 'r') as file: clean_lines = [' '.join(line.split()) for line in file] for delims in [(' ,',','),(', ',','),(' ',',')]: clean_lines = [line.replace(*delims) for line in clean_lines] data = np.genfromtxt(clean_lines, delimiter=',',dtype='float_') except (TypeError, ValueError, AttributeError): return 3 #Checking if the data array has 2 or 3 columns try: if(not len(data[0])==2 and not len(data[0])==3): return 4 except TypeError: return 4 #Checking if there are any NaN's or Inf's in the data if(np.any(np.isnan(data)) or np.any(np.isinf(data))): return 5 #Checking if the errors are all positive number if(len(data[0])==3 and np.any(data[:,2]<=0)): return 6 #Setting global variables numberOfDataPoints = len(data) data = data[data[:,0].argsort()] #Sorting the array in ascending order along x column x = data[:,0] y = data[:,1] #Checking if error along y axis has been provided if(len(data[0])==2): y_err = np.array([1 for i in data]) #Constant error to aid in best chi-squared estimate ERR = False elif(len(data[0])==3): y_err = data[:,2] ERR = True #All ran correctly! return 0 #################################################################################### # FIT - RELATED FUNCTIONS # #################################################################################### ''' This function calculates the chi-squared against the data set given specific values of fitting function parameters. @Arguments: params - array containing parameters of the fitting function to calculate chi-squared against @Return value: Returns chi-squared as a float. ''' def calcChiSquared(params): #Access to global variables global function global x global y global y_err #Returning chi-squared value for the given fitting function parameters return np.sum( ((y-functions[function].func(x,*params))/y_err)**2 ) ''' This function calculates the final chi-squared as well as reduced chi-squared of the fit. It also calculates the acceptable range of reduced chi-squared based on the chi-squared statistic. @Arguments: -- @Return value: -- ''' def calcGoodnessOfFit(): #Access to global variables global numberOfDataPoints global numberOfParameters global fitParameters global chiSquared global redChiSquared global redChiSquaredLimits #Calculating degrees of freedom degreesOfFreedom = numberOfDataPoints - numberOfParameters #Calculating chi-squared and reduced chi-squared chiSquared = calcChiSquared(fitParameters) redChiSquared = chiSquared/degreesOfFreedom #Calculating the "acceptable" range of reduced chi-squared pValues = [0.95,0.05] redChiSquaredLimits = stats.chi2.isf(pValues,degreesOfFreedom)/degreesOfFreedom ''' This function provides an initial guess for the final fitting to take place in fitFunction(). It comes into play when the user wants to fit the data automatically. The initial guess is based on a two step procedure. It involves looking at the data: 1) and figuring out a single-valued "guess" 2) or figuring out bounds on the parameters and obtaining a guess from that by global minimization of chi-squared using the scipy differential evolution algorithm. @Arguments: -- @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def guessParameters(): #Access to global variables global function global x global y global numberOfDataPoints #Useful quantities for parameter estimation xmin = min(x) xmax = max(x) ymin = min(y) ymax = max(y) #Empty array to store "initial guess" iniParameters = [] #All the parameter estimation happens here if(function in ['Constant','Linear','Quadratic','Cubic','Quartic','Quintic']): order = numberOfParameters - 1 iniParameters = np.polyfit(x,y,deg=order,w=1/y_err) elif(function=='Sine wave'): x_range = xmax - xmin y_range = ymax - ymin y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range)) A_bound = (abs(y_range)/3,2*abs(y_range)/3) phi_bound = (0,2*np.pi) y_avg = np.average(y) y_std = np.std(y) yscaled = [] for i in y: if(i>y_avg+y_std): yscaled.append(1) elif(i<y_avg-y_std): yscaled.append(-1) else: yscaled.append(0) flag = yscaled[0] crossings = 0 for i in yscaled: if(i==0): continue if(flag==0): flag=i elif(i==-flag): flag = -flag crossings+=1 crossings = crossings/2 guess_f = crossings/x_range omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f) BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Square wave'): x_range = xmax - xmin y_range = ymax - ymin y0_bound = (ymin+2/5*abs(y_range),ymax-2/5*abs(y_range)) A_bound = (abs(y_range)/3,2*abs(y_range)/3) phi_bound = (0,2*np.pi) y_avg = np.average(y) y_std = np.std(y) yscaled = [] for i in y: if(i>y_avg+y_std): yscaled.append(1) elif(i<y_avg-y_std): yscaled.append(-1) else: yscaled.append(0) flag = yscaled[0] crossings = 0 for i in yscaled: if(i==0): continue if(flag==0): flag=i elif(i==-flag): flag = -flag crossings+=1 crossings = crossings/2 guess_f = crossings/x_range omg_bound = (0.5*(2*np.pi)*guess_f,2*(2*np.pi)*guess_f) BOUNDS = [y0_bound, A_bound, omg_bound, phi_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Gaussian'): x_range = xmax - xmin y_range = ymax - ymin mu_bound = (xmin-x_range,xmax+x_range) omg_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*2.5*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, mu_bound, omg_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2.5*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, mu_bound, omg_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Poisson'): x_range = xmax - xmin y_range = ymax - ymin lmd_bound = (max(0,xmin-x_range),xmax+x_range) A_bound1 = (0,2*abs(y_range)) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, lmd_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (0,-2*abs(y_range)) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound1, A_bound1, lmd_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Laplacian'): x_range = xmax - xmin y_range = ymax - ymin mu_bound = (xmin-x_range,xmax+x_range) b_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*2*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, mu_bound, b_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*2*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, mu_bound, b_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Lorentzian'): x_range = xmax - xmin y_range = ymax - ymin x0_bound = (xmin-x_range,xmax+x_range) omg_bound = (0,x_range) A_bound1 = (abs(y_range)/3,2*abs(y_range)*np.pi/2*x_range) y0_bound1 = (ymin-y_range,ymin+y_range/2) BOUNDS1 = [y0_bound1, A_bound1, x0_bound, omg_bound] BOUNDS1 = [np.sort(bound) for bound in BOUNDS1] A_bound2 = (-abs(y_range)/3,-2*abs(y_range)*np.pi/2*x_range) y0_bound2 = (ymax-y_range/2,ymax+y_range) BOUNDS2 = [y0_bound2, A_bound2, x0_bound, omg_bound] BOUNDS2 = [np.sort(bound) for bound in BOUNDS2] BOUNDS_LIST = [BOUNDS1,BOUNDS2] with warnings.catch_warnings(): warnings.filterwarnings('ignore') bestChiSquared = np.inf for BOUNDS in BOUNDS_LIST: tempParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x tempChiSquared = calcChiSquared(tempParameters) if(tempChiSquared < bestChiSquared): bestChiSquared = tempChiSquared iniParameters = tempParameters elif(function=='Power'): lX = np.log(abs(x), where=x>0) lY = np.log(abs(y), where=x>0) with np.errstate(invalid='ignore'): b_est, logA_est = np.polyfit(lX,lY,w=np.exp(lX),deg=1) A_est = np.exp(logA_est) A_bound = (-A_est,A_est) b_bound = (b_est-0.5*abs(b_est),b_est+0.5*abs(b_est)) BOUNDS = [A_bound,b_bound] BOUNDS = [np.sort(bound) for bound in BOUNDS] with warnings.catch_warnings(): warnings.filterwarnings('ignore') iniParameters = opt.differential_evolution(calcChiSquared,bounds=BOUNDS,seed=0).x elif(function=='Exponential'): #Inspired by https://github.com/scipy/scipy/pull/9158 s = np.empty_like(y) s[0] = 0 s[1:] = np.cumsum(0.5 * (y[1:] + y[:-1]) * np.diff(x)) xn = np.array(x - x[0]) yn = np.array(y - y[0]) sx2 = np.sum(xn**2) sxs = np.sum(xn*s) sys = np.sum(yn*s) ss2 = np.sum(s**2) sxy = np.sum(xn*yn) _, [b] = linalg.inv([[sx2, sxs], [sxs, ss2]]).dot([[sxy], [sys]]) ex = np.exp(b * x) se1 = np.sum(ex) se2 = np.sum(ex**2) sy0 = np.sum(y) sye = np.sum((y * ex)) [y0], [A] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sy0], [sye]]) iniParameters = [y0,A,b] elif(function=='Logarithm'): #Inspired by https://github.com/scipy/scipy/pull/9158 s = np.empty_like(x) s[0] = 0 s[1:] = np.cumsum(0.5 * (x[1:] + x[:-1]) * np.diff(y)) xn = np.array(x - x[0]) yn = np.array(y - y[0]) sy2 = np.sum(yn**2) sys = np.sum(yn*s) sxs = np.sum(xn*s) ss2 = np.sum(s**2) syx = np.sum(xn*yn) _, [t1] = linalg.inv([[sy2, sys], [sys, ss2]]).dot([[syx], [sxs]]) A = 1/t1 ey = np.exp(t1 * y) se1 = np.sum(ey) se2 = np.sum(ey**2) sx0 = np.sum(x) sxe = np.sum((x * ey)) [x0], [t2] = linalg.inv([[x.size, se1], [se1, se2]]).dot([[sx0], [sxe]]) with warnings.catch_warnings(): warnings.filterwarnings('ignore') y0 = -A*np.log(t2) iniParameters = [y0,A,x0] #If there is no initial guess if(iniParameters==[]): return 1 #Sending the "best guess" parameters to the final fitting algorithm return fitFunction(iniParameters) ''' This function converts a string containing the guess parameters for fitting provided by the user into an array of floats for the fitFunction() to use. It comes into play when the user wants to fit the data manually. @Arguments: iniParametersString - string containing initial parameters @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def manualParameters(iniParametersString): #Access to global variables global numberOfParameters #Splitting string delimited by commas splitString = iniParametersString.split(',') #Trying to populate the iniParameters array iniParameters = [] try: for i in splitString: #Further splitting each sub-string into "right and left" of the "=" sign temp1 = i.split('=') #Taking the value on the right side and converting to float temp2 = float(temp1[1]) #Adding it to the iniParameters array iniParameters.append(temp2) except (ValueError,IndexError): return 3 #Checking if the number of parameters expected and received match if(len(iniParameters)!=numberOfParameters): return 3 #Sending the "manually input" parameters to the final fitting algorithm return fitFunction(iniParameters) ''' This function does the final fitting of the data. It takes an inital guess on the parameters and optimizes from there. @Arguments: iniParameters - array containing initial guess @Return value: Returns an integer denoting success (0) or failure (non 0) of the function. ''' def fitFunction(iniParameters): #Access to global variables global function global x global y global y_err global numberOfDataPoints global numberOfParameters global fitStructure global fitParameters global fitErrors if(numberOfDataPoints<numberOfParameters): return 2 #Doing the final fitting of the data try: #Ignoring runtime warnings (in case the optimization passes through invalid values) with warnings.catch_warnings(): warnings.filterwarnings('ignore') #Main optimization happens here #Note: curve_fit populates sigma with 1's as a default. #absolute_sigma = True is the flag that forces errors to not be used in a relative manner fitStructure = opt.curve_fit(functions[function].func,x,y,absolute_sigma=True,p0=iniParameters,sigma=y_err) #Catching errors except RuntimeError as e: #Optimization failed if (str(e).startswith('Optimal parameters not found: Number of calls to function has reached maxfev')): return 1 #Something else went wrong else: raise #Filling in the fit parameters and errors on them (from the covariance matrix) fitParameters = fitStructure[0] fitErrors = np.sqrt(np.diag(fitStructure[1])) #Quantizing the goodness of fit calcGoodnessOfFit() #All ran correctly! return 0 #################################################################################### # PLOTTING FUNCTIONS # #################################################################################### ''' This function plots the raw data (without the fit). @Arguments: plotTitle - string holding the title of the plot xTitle - string holding the label for the x axis yTitle - string holding the label for the y axis viewGrid - boolean denoting whether the user wants the plot to have gridlines @Return value: -- ''' def plotRawData(plotTitle,xTitle,yTitle,viewGrid): #Access to global variables global x global y global y_err global ERR #Creating figure and adding subplot figure1 = plt.figure() axes1 = figure1.add_subplot(111) #Setting x and y axis labels axes1.set_title(plotTitle, fontsize='x-large') axes1.set_xlabel(xTitle, fontsize='large') axes1.set_ylabel(yTitle, fontsize='large') #Checking if user wants to add grid to plot and adding them if(viewGrid): axes1.minorticks_on() axes1.set_axisbelow(True) axes1.grid(b=True, which='major', alpha=0.5) axes1.grid(b=True, which='minor', alpha=0.2) #Plotting the raw data if(ERR): axes1.errorbar(x,y,y_err,fmt='.',color='midnightblue',ecolor='royalblue',capsize=2) else: axes1.scatter(x,y,color='midnightblue', label='Data') #Displaying the beauty figure1.show() ''' This function plots the raw data along with the fitting function and shows the fitting parameters if the user wants to see it. @Arguments: plotTitle - string holding the title of the plot xTitle - string holding the label for the x axis yTitle - string holding the label for the y axis viewGrid - boolean holding whether the user wants the plot to have gridlines viewParameters - boolean holding whether the user wants to see the fitting parameters viewResiduals - boolean holding whether the user wants to see the residuals plot @Return value: -- ''' def plotFitData(plotTitle,xTitle,yTitle,viewGrid,viewParameters,viewResiduals): #Access to global variables global x global y global y_err global ERR global numberOfDataPoints global function global numberOfParameters global fitParameters global fitErrors global chiSquared global redChiSquared global redChiSquaredLimits #Creating figure and adding subplots figure2 = plt.figure() if(viewResiduals and viewParameters): gs = gridspec.GridSpec(2, 2, height_ratios=[3, 1], width_ratios=[4,1]) axes2 = figure2.add_subplot(gs[0,0]) axes3 = figure2.add_subplot(gs[1,0]) axes4 = figure2.add_subplot(gs[0,1]) elif(viewResiduals and not viewParameters): gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) axes2 = figure2.add_subplot(gs[0]) axes3 = figure2.add_subplot(gs[1]) elif(not viewResiduals and viewParameters): gs = gridspec.GridSpec(1, 2, width_ratios=[4, 1]) axes2 = figure2.add_subplot(gs[0]) axes4 = figure2.add_subplot(gs[1]) else: axes2 = figure2.add_subplot(111) #Setting axes titles axes2.set_title(plotTitle, fontsize='x-large') axes2.set_xlabel(xTitle, fontsize='large') axes2.set_ylabel(yTitle, fontsize='large') #Checking if user wants to add grid to plot and adding them if(viewGrid): axes2.minorticks_on() axes2.set_axisbelow(True) axes2.grid(b=True, which='major', alpha=0.5) axes2.grid(b=True, which='minor', alpha=0.2) if(viewResiduals): axes3.minorticks_on() axes3.set_axisbelow(True) axes3.grid(b=True, which='major', alpha=0.5) axes3.grid(b=True, which='minor', alpha=0.2) #Plotting the raw data if(ERR): axes2.errorbar(x,y,y_err, fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1, label='Data') else: axes2.scatter(x,y,color='midnightblue', label='Data') #Plotting the best fit xx = np.linspace(min(x),max(x),1000) yy = functions[function].func(xx,*fitParameters) axes2.plot(xx,yy,color='darkorange', zorder=2, label='Fit function') #Plotting the residuals if(viewResiduals): residuals = functions[function].func(x,*fitParameters) - y axes3.axhline(0,color='darkorange', zorder=2) if(ERR==True): axes3.errorbar(x,residuals,y_err,fmt='.', color='midnightblue', ecolor='royalblue', capsize=2, zorder=1) else: axes3.scatter(x,residuals,color='midnightblue') #Adding legend to the plot axes2.legend(markerscale=2, fontsize='large') #Displaying fit parameters if the user wants if(viewParameters): #Removing x and y axis axes4.set_axis_off() #Declaring the string array that holds everything displayed in the parameters box parametersStr = [] #Adding function type to parameters box parametersStr.append(r"$\bf{Function:}$") parametersStr.append(functions[function].name) parametersStr.append(functions[function].rawFuncStr) #Adding fit parameters to the parameters box parametersStr.append("") parametersStr.append(r"$\bf{Fitting\/parameters:}$") for i in range(numberOfParameters): parametersStr.append(functions[function].rawParametersStr[i]+r' = {0:.5e} $\pm$ {1:.5e}'.format(fitParameters[i],fitErrors[i])) #Adding some additional fitting details to the parameters box parametersStr.append("") parametersStr.append(r"$\bf{Other\/fitting\/data:}$") parametersStr.append(r'Number of data points = {0}'.format(numberOfDataPoints)) parametersStr.append(r'Number of parameters = {0}'.format(numberOfParameters)) parametersStr.append(r'$\chi^2$ = {0:.5e}'.format(chiSquared)) parametersStr.append(r'$\chi_r^2$ = {0:.5e}'.format(redChiSquared)) parametersStr.append(r'Acceptable range of $\chi_r^2$ = ({0:.2f},{1:.2f})'.format(redChiSquaredLimits[0],redChiSquaredLimits[1])) #Adding an important note if(not ERR): parametersStr.append("") parametersStr.append(r'$\bf{Note}$: Errors and chi-squared estimates') parametersStr.append(r'here dont mean much since no errors') parametersStr.append(r'along y-axis are present!') #Joining all elements of the string array into a single string separated by \n's parametersStr = '\n'.join(parametersStr) #Placing the parameters box in the plot axes4.text(-0.35,1.0,parametersStr, bbox=dict(boxstyle="square", fc="lemonchiffon", ec="darkorange", pad=0.5), va='top', ha='left', fontsize='large', linespacing=1.3) #Displaying the beauty figure2.show() ''' APPENDIX: Check efficiency of differential evolution against other global minimization techniques: iniParameters = opt.brute(calcChiSquared,ranges=[],finish=opt.fmin) iniParameters = opt.basinhopping(calcChiSquared,x0=[]) '''
Fitting.py
36,425
Jiten Dhandha, 2020 CFit is a curve fitting tool in python, based on the method of least squares. It comes equipped with some standard functions and a graphical user interface. Inspired by: LSFR.py, Abie Marshall, The University of Manchester, 2016 LIBRARIES This requires PyQt5 to be installed. LIST OF FUNCTIONS Class to hold all relevant function informationDictonary to hold the functions GLOBAL VARIABLES DATA RELATED VARIABLESholds the data from data fileholds the x values from the data fileholds the y values from the data fileholds the y errors, either from user file or generatedboolean to check if data file contains errorsholds the number of points in the data fileFIT FUNCTION RELATED VARIABLESstring holding the function to fit toholds the number of parameters of the fitting functionFITTING VARIABLESholds the fitting information from curve_fit/polyfitholds the fitting parametersholds the errors on the fitting parametersholds the final chi-squared value of the fitholds the final reduced chi-squared value of the fitholds the "acceptable range" of reduced chi-squared READING USER FILE Access to global variablesChecking if the file string is emptyChecking if the file is a .txt or .csv fileTrying to populate the data array from the file (allows both spaces and commas)Checking if the data array has 2 or 3 columnsChecking if there are any NaN's or Inf's in the dataChecking if the errors are all positive numberSetting global variablesSorting the array in ascending order along x columnChecking if error along y axis has been providedConstant error to aid in best chi-squared estimateAll ran correctly! FIT - RELATED FUNCTIONS Access to global variablesReturning chi-squared value for the given fitting function parametersAccess to global variablesCalculating degrees of freedomCalculating chi-squared and reduced chi-squaredCalculating the "acceptable" range of reduced chi-squaredAccess to global variablesUseful quantities for parameter estimationEmpty array to store "initial guess"All the parameter estimation happens hereInspired by https://github.com/scipy/scipy/pull/9158Inspired by https://github.com/scipy/scipy/pull/9158If there is no initial guessSending the "best guess" parameters to the final fitting algorithmAccess to global variablesSplitting string delimited by commasTrying to populate the iniParameters arrayFurther splitting each sub-string into "right and left" of the "=" signTaking the value on the right side and converting to floatAdding it to the iniParameters arrayChecking if the number of parameters expected and received matchSending the "manually input" parameters to the final fitting algorithm Access to global variablesDoing the final fitting of the dataIgnoring runtime warnings (in case the optimization passes through invalid values) Main optimization happens hereNote: curve_fit populates sigma with 1's as a default.absolute_sigma = True is the flag that forces errors to not be used in a relative mannerCatching errorsOptimization failedSomething else went wrongFilling in the fit parameters and errors on them (from the covariance matrix)Quantizing the goodness of fitAll ran correctly! PLOTTING FUNCTIONS Access to global variablesCreating figure and adding subplotSetting x and y axis labelsChecking if user wants to add grid to plot and adding themPlotting the raw dataDisplaying the beautyAccess to global variablesCreating figure and adding subplotsSetting axes titlesChecking if user wants to add grid to plot and adding themPlotting the raw dataPlotting the best fitPlotting the residualsAdding legend to the plotDisplaying fit parameters if the user wantsRemoving x and y axisDeclaring the string array that holds everything displayed in the parameters boxAdding function type to parameters boxAdding fit parameters to the parameters boxAdding some additional fitting details to the parameters boxAdding an important noteJoining all elements of the string array into a single string separated by \n's Placing the parameters box in the plotDisplaying the beauty
4,576
en
0.703767
# -*- coding: utf-8 -*- # @Author: jankincai # @Date: 2021-01-26 23:18:43 # @Last Modified by: jankincai # @Last Modified time: 2021-01-26 23:27:01 class LibpcapError(Exception): """Exception raised for errors in the libpcap. """ def __init__(self, message): """init """ self.message = message def __str__(self): """""" return self.message
pylibpcap/exception.py
406
Exception raised for errors in the libpcap. init -*- coding: utf-8 -*- @Author: jankincai @Date: 2021-01-26 23:18:43 @Last Modified by: jankincai @Last Modified time: 2021-01-26 23:27:01
206
en
0.604112
import os import numpy as np import matplotlib.pyplot as plt # import sys, os # sys.path.append(os.path.join(os.path.dirname(__file__), 'utils')) import process_data import common def plot_gt(Y_origin_data, pose_folder, preprocessed_folder, data_seqs, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"): start_idx = 0 end_idx = 0 additional_row = np.array([0, 0, 0, 1], dtype=np.float64) for seq in data_seqs: end_idx += seq_sizes[seq] origin_poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64) for idx, row in enumerate(Y_origin_data[start_idx:end_idx]): new_pose = np.array(list(map(float, row.strip().split(" "))), dtype=np.float64) new_pose = np.concatenate((new_pose, additional_row)) new_pose = new_pose.reshape(4,4) origin_poses[idx] = new_pose fig = plt.figure(figsize=(10,10)) if dim == "2d": plt.scatter(origin_poses[:,0,3],origin_poses[:,1,3], c=origin_poses[:,2,3], s=20, alpha=0.5) else: # 3d ax = fig.add_subplot(111, projection='3d') ax.scatter(origin_poses[:,0,3],origin_poses[:,1,3],origin_poses[:,2,3],c=origin_poses[:,1,3], s=20, alpha=0.5) if save_graph: graph_folder = os.path.join('result', dataset, 'graph') os.makedirs(graph_folder, exist_ok=True) plt.savefig(os.path.join(graph_folder, f"gt_{seq}_{dim}.png")) # plt.close(fig) start_idx += seq_sizes[seq] def plot_results(Y_origin_data, Y_estimated_data, data_seqs, rnn_size, seq_sizes, dim="2d", save_graph=True, dataset="KITTI"): start_idx = 0 end_idx = 0 additional_row = np.array([0, 0, 0, 1], dtype=np.float64) for i, seq in enumerate(data_seqs): end_idx += seq_sizes[seq] poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64) for idx in range(rnn_size): current_pose = np.array(list(map(float, Y_origin_data[start_idx+idx].strip().split(" "))), dtype=np.float64) current_pose = np.concatenate((current_pose, additional_row)) current_pose = current_pose.reshape(4,4) poses[idx] = current_pose for idx, relative_pose in enumerate(Y_estimated_data[start_idx-i*rnn_size:end_idx-(i+1)*rnn_size]): rot_mat = common.euler_to_rot_mat(relative_pose[5],relative_pose[4],relative_pose[3]) trans_mat = np.identity(4) trans_mat[:3,:3]=rot_mat trans_mat[0,3]=relative_pose[0] trans_mat[1,3]=relative_pose[1] trans_mat[2,3]=relative_pose[2] current_pose = np.dot(current_pose, trans_mat) poses[idx + rnn_size] = current_pose fig = plt.figure(figsize=(10,10)) if dim == "2d": plt.scatter(poses[:,0,3],poses[:,1,3], c=poses[:,2,3], s=20, alpha=0.5) else: # 3d ax = fig.add_subplot(111, projection='3d') ax.scatter(poses[:,0,3],poses[:,1,3],poses[:,2,3],c=poses[:,1,3], s=20, alpha=0.5) if save_graph: graph_folder = os.path.join('result', dataset, 'graph') os.makedirs(graph_folder, exist_ok=True) plt.savefig(os.path.join(graph_folder, f"est_{seq}_{dim}.png")) # plt.close(fig) start_idx += seq_sizes[seq]
utils/plot.py
3,384
import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), 'utils')) 3d plt.close(fig) 3d plt.close(fig)
116
fa
0.120761
""" Module for handling the custom Lua commands for the bot """ import lupa import shlex import time from threading import Thread from .utils import human_readable_time, ArgumentParser from .http import Http, TupleData from .timer import Interval, Delayed from .chat import Chat class CommandPermissionError(BaseException): """ An exception that happens when a user tries to execute a custom command without the appropriate user level for it. """ pass class CommandCooldownError(BaseException): """ An exception that happens when a user tries to execute a custom command during it's cooldown period. """ pass class DataSource(object): """ A simple structure to allow Lua to store and read data from the database Call from Lua via the injected _G["datastore"] instance: .. code-block:: lua _G["datastore"].set("my-data", "my-value") _G["datastore"].get("my-data") If not working directly with the datasource implementation you should however use the datasource wrapper: .. code-block:: lua local ds = require('datasource') ds.set("my-data", "my-value") ds.get("my-data") """ def __init__(self, channel, bot, data=None): if not data: data = {} self.channel = channel self.bot = bot self.data = data def get(self, key): """ Get a single value from the database :param key: The name of the value :return: The stored value or "null" if not found """ if not key in self.data: # TODO: Check how to work around this silly shit return "null" return self.data[key] def set(self, key, value): """ Set a single value to the database :param key: The name of the value :param value: The value to be stored :return: None """ self.data[key] = value self.bot.update_global_value(self.channel, key, value) class CommandManager(object): """ Manager for custom commands """ # Template for creating new Lua functions func_template = u""" function __chat__{func_name}({args}) {func_body} end """ # Function template for doing Lua function calls # {{ and }} are escaped { and } for .format() call_template = u""" function(...) local chat = require("chat") local retval = __chat__{func_name}(unpack(table.pack(...))) if retval ~= nil then chat.message(retval) end return retval end """ def __init__(self, channel, bot, settings=None, data=None, logger=None, chat=None): self.channel = channel self.bot = bot if chat: self.chat = chat else: self.chat = Chat(self.bot, self.channel) self.settings = settings self.logger = logger self.commands = {} self.timers = [] self.datasource = DataSource(channel, bot, data) self.commands_last_executed = {} self.lua = lupa.LuaRuntime(unpack_returned_tuples=False) self._inject_globals() def stop_timers(self): """ Cancel all timers still running :return: """ for timer in self.timers: timer.cancel() def add_command(self, args): """ Handler for the "def" -commands in chat :param args: All the words after the "def" -command :return: The created command, if any, and the minimum required user level """ added, command, flags, user_level, code = self._parse_func(args) channel, command, flags, user_level, code = self.load_command( command, flags, user_level, code ) return added, channel, command, flags, user_level, code def add_simple_command(self, args): """ Handler for the "com" -commands in chat :param args: All the words after the "com" -command :return: A bunch of stuff """ added, command, flags, user_level, code = self._parse_simple_func(args) channel, command, flags, user_level, code = self.load_command( command, flags, user_level, code ) return added, channel, command, flags, user_level, code def is_valid_command(self, command): """ Check if the given command is registered :param command: The name of the command :return: True or False """ return command in self.commands def load_command(self, command, flags, user_level, code, set=True): """ Load a command in the runtime :param command: What is the command called :param flags: Command flags :param user_level: The minimum user level to run the command :param code: The Lua code for the custom command :param set: Should the command be set on the bot via set_command, set this to False when loading commands from e.g. the database :return: None """ if self.logger: self.logger.debug(u"Loading command {0} with user level " u"{1}".format( command, user_level )) self.commands[command] = { "flags": flags, "user_level": user_level, "code": code } self.load_lua(code) return self.channel, command, flags, user_level, code def run_command(self, nick, user_level, command, args=None, timestamp=None, threaded=True): """ Handles running of custom commands from chat :param nick: The calling user :param user_level: The calling user's level :param command: The command triggered :param args: The words on the line after the command :param timestamp: The unixtime for when the event happened :return: Any return value from the custom Lua command, to be sent back to the channel :raise CommandPermissionError: If user lacks permissions for command """ if not self._can_run_command(user_level, command): raise CommandPermissionError(u"User does not have permission to " u"run this command") if args is None: args = [] else: if "quoted" in self.commands[command]["flags"]: if self.commands[command]["flags"]["quoted"] == 1: text = " ".join(args) args = shlex.split(text) if timestamp is None: timestamp = time.time() if self._is_under_cooldown(command, timestamp): raise CommandCooldownError() self._set_last_executed_time(command, timestamp) def run(): code = self.call_template.format(func_name=command) lua_func = self.lua.eval(code) if "want_user" in self.commands[command]["flags"]: if self.commands[command]["flags"]["want_user"] == 1: args.insert(0, nick) return lua_func(*args) if threaded: lua_thread = Thread(target=run) lua_thread.daemon = True lua_thread.start() else: return run() def load_lua(self, code): """ Load Lua code in our runtime :param code: The Lua code :return: None """ self.lua.execute(code) def _parse_func(self, args): """ Process the given arguments into a function definition :param args: List of the words after the "def" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args """ parser = ArgumentParser() parser.add_argument("-ul", "--user_level", default="mod") parser.add_argument("-c", "--cooldown", default=None) parser.add_argument("-a", "--args", default="") parser.add_argument("-w", "--want_user", action="store_true", default=False) parser.add_argument("-q", "--quoted", action="store_true", default=False) parser.add_argument("func_name") parser.add_argument("func_body", nargs='*') options = parser.parse_args(args) # Rebuild code if options.want_user: new_args = "user" if len(options.args) > 0: new_args += "," options.args = new_args + options.args code = self.func_template.format( func_name=options.func_name, args=options.args, func_body=" ".join(options.func_body) ) flags = { "want_user": int(options.want_user), "quoted": int(options.quoted), "cooldown": (int(options.cooldown) if options.cooldown else None) } added = bool(options.func_body) return added, options.func_name, flags, options.user_level, code def _parse_simple_func(self, args): """ Process the given arguments into a simple function definition :param args: List of the words after the "com" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args """ parser = ArgumentParser() parser.add_argument("-ul", "--user_level", default="mod") parser.add_argument("-c", "--cooldown", default=None) parser.add_argument("func_name") parser.add_argument("response_text", nargs='*') options = parser.parse_args(args) # Rebuild response response_text = " ".join(options.response_text) response_text = response_text.replace("\\", "\\\\") response_text = response_text.replace('"', '\\"') func_body = u""" return SimpleCom("{response_text}", user, table.pack(...)) """.format(response_text=response_text) code = self.func_template.format( func_name=options.func_name, args="user,...", func_body=func_body ) flags = { "want_user": 1, "quoted": 0, "cooldown": (int(options.cooldown) if options.cooldown else None) } added = bool(options.response_text) return added, options.func_name, flags, options.user_level, code def _is_under_cooldown(self, command, timestamp): """ Check if this command's cooldown period is in effect :param command: Which command :param timestamp: What is the timestamp it was issued on :return: """ if command in self.commands_last_executed: if "cooldown" in self.commands[command]["flags"]: cooldown_period = self.commands[command]["flags"]["cooldown"] last_executed = self.commands_last_executed[command] if cooldown_period is not None: cooldown_expires = last_executed + cooldown_period if timestamp < cooldown_expires: return True return False def _set_last_executed_time(self, command, timestamp): """ Save the last execution time of a command :param command: Which command :param timestamp: What is the timestamp it was issued on :return: """ self.commands_last_executed[command] = timestamp def _level_name_to_number(self, name): """ Convert the given user level to a number :param name: Level name :return: A number between 0 and Infinity, higher number is higher user level :raise ValueError: In case of invalid user level """ levels = [ "user", "reg", "mod", "owner" ] if not name in levels: raise ValueError(u"{0} is not a valid user level".format(name)) return levels.index(name) def _can_run_command(self, user_level, command): """ Check if this command can be run with the given user level :param user_level: The calling user's level :param command: The command being called :return: True of False """ need_level = self._level_name_to_number( self.commands[command]["user_level"] ) got_level = self._level_name_to_number(user_level) return got_level >= need_level def _inject_globals(self): """ Inject some Python objects and functions into the Lua global scope _G :return: None """ injector = self.lua.eval(""" function (key, value) _G[key] = value end """) def log(message): """ Pass a message from Lua to the Python logger :param message: The message text :return: None """ self.logger.debug(u"Lua: " + str(message)) def interval(seconds, function): i = Interval(seconds, function, self.lua) self.timers.append(i) return i def delayed(seconds, function): i = Delayed(seconds, function, self.lua) self.timers.append(i) return i def simple_com(text, user, args): params = [] if args: for key in args: if key != "n": params.append(args[key]) try: response = text.format(*params, user=user) except IndexError: response = user + u", invalid number of arguments." return response injector("log", log) injector("datasource", self.datasource) injector("human_readable_time", human_readable_time) injector("settings", self.settings) injector("Chat", self.chat) injector("Http", Http()) injector("TupleData", TupleData) injector("Interval", interval) injector("Delayed", delayed) injector("SimpleCom", simple_com)
bot/commandmanager.py
14,566
An exception that happens when a user tries to execute a custom command during it's cooldown period. Manager for custom commands An exception that happens when a user tries to execute a custom command without the appropriate user level for it. A simple structure to allow Lua to store and read data from the database Call from Lua via the injected _G["datastore"] instance: .. code-block:: lua _G["datastore"].set("my-data", "my-value") _G["datastore"].get("my-data") If not working directly with the datasource implementation you should however use the datasource wrapper: .. code-block:: lua local ds = require('datasource') ds.set("my-data", "my-value") ds.get("my-data") Check if this command can be run with the given user level :param user_level: The calling user's level :param command: The command being called :return: True of False Inject some Python objects and functions into the Lua global scope _G :return: None Check if this command's cooldown period is in effect :param command: Which command :param timestamp: What is the timestamp it was issued on :return: Convert the given user level to a number :param name: Level name :return: A number between 0 and Infinity, higher number is higher user level :raise ValueError: In case of invalid user level Process the given arguments into a function definition :param args: List of the words after the "def" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args Process the given arguments into a simple function definition :param args: List of the words after the "com" command :return: Function name, if it wants the caller's user name, the required user level, and the function's Lua code :raise argparse.ArgumentError: There was something wrong with the args Save the last execution time of a command :param command: Which command :param timestamp: What is the timestamp it was issued on :return: Handler for the "def" -commands in chat :param args: All the words after the "def" -command :return: The created command, if any, and the minimum required user level Handler for the "com" -commands in chat :param args: All the words after the "com" -command :return: A bunch of stuff Get a single value from the database :param key: The name of the value :return: The stored value or "null" if not found Check if the given command is registered :param command: The name of the command :return: True or False Load a command in the runtime :param command: What is the command called :param flags: Command flags :param user_level: The minimum user level to run the command :param code: The Lua code for the custom command :param set: Should the command be set on the bot via set_command, set this to False when loading commands from e.g. the database :return: None Load Lua code in our runtime :param code: The Lua code :return: None Pass a message from Lua to the Python logger :param message: The message text :return: None Handles running of custom commands from chat :param nick: The calling user :param user_level: The calling user's level :param command: The command triggered :param args: The words on the line after the command :param timestamp: The unixtime for when the event happened :return: Any return value from the custom Lua command, to be sent back to the channel :raise CommandPermissionError: If user lacks permissions for command Set a single value to the database :param key: The name of the value :param value: The value to be stored :return: None Cancel all timers still running :return: Module for handling the custom Lua commands for the bot TODO: Check how to work around this silly shit Template for creating new Lua functions Function template for doing Lua function calls {{ and }} are escaped { and } for .format() Rebuild code Rebuild response
3,982
en
0.779221
from typing import List, Optional, Union from dataclasses import dataclass, field ## - - - - - - - ## userinfo.json ## - - - - - - - @dataclass class UserAddress: formatted: str = "" @dataclass class UserInfoResponse: group_ids: List[str] = field(default_factory=list) sub: str = "" given_name: str = "" name: str = "" email: str = "" phone_number: Optional[str] = None address: Optional[UserAddress] = None picture: Optional[str] = None ## - - - - - - - ## sprint.json ## - - - - - - - @dataclass class Sprint: id: str name: str start: str end: str @dataclass class ShortUserInfo: id: str name: str picture: str @dataclass class Issue: id: str summary: str description_short: str index: int status_id: str story_points: int assigned: Optional[ShortUserInfo] modified_at: str @dataclass class SprintResponse: sprint: Sprint issues: List[Issue] ## - - - - - - - ## create-task.json ## - - - - - - - @dataclass class CreateTaskRequestBody: author: str assigned: str summary: str project: str sprint: str sprint: str labels: List[str] issue_type: str components: List[str] description: str priority: str @dataclass class CreateTaskPerson: id: str email: str name: str picture: str is_active: bool @dataclass class CreateTaskProject: id: str name: str @dataclass class CreateTaskStatus: id: str name: str @dataclass class CreateTaskActivity: user_id: str action: str created_at: str details: Optional[Union[CreateTaskPerson, CreateTaskStatus]] = None @dataclass class CreateTaskResponse: author: CreateTaskPerson assigned: CreateTaskPerson summary: str project: CreateTaskProject sprint: str labels: List[str] issue_type: str components: List[str] description: str priority: str status: CreateTaskStatus activity: List[CreateTaskActivity] created_at: str modified_at: str ## - - - - - - - ## update-task.json ## - - - - - - - @dataclass class UpdateTaskRequestBody(CreateTaskRequestBody): pass
frameworks/schema_dataclasses.py
2,178
- - - - - - - userinfo.json - - - - - - - - - - - - - - sprint.json - - - - - - - - - - - - - - create-task.json - - - - - - - - - - - - - - update-task.json - - - - - - -
171
it
0.329307
# # Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending # import os import unittest from deephaven import kafka_consumer as ck from deephaven.stream.kafka.consumer import TableType, KeyValueSpec from tests.testbase import BaseTestCase from deephaven import dtypes class KafkaConsumerTestCase(BaseTestCase): def _assert_common_cols(self, cols): self.assertEqual("KafkaPartition", cols[0].name) self.assertEqual(dtypes.int32, cols[0].data_type) self.assertEqual("KafkaOffset", cols[1].name) self.assertEqual(dtypes.long, cols[1].data_type) self.assertEqual("KafkaTimestamp", cols[2].name) self.assertEqual(dtypes.DateTime, cols[2].data_type) def test_basic_constants(self): """ Check that the basic constants are imported and visible. """ self.assertIsNotNone(ck.SEEK_TO_BEGINNING) self.assertIsNotNone(ck.DONT_SEEK) self.assertIsNotNone(ck.SEEK_TO_END) self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING) self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END) self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK) def test_simple_spec(self): """ Check a simple Kafka subscription creates the right table. """ t = ck.consume( {'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.simple_spec('Price', dtypes.double)) cols = t.columns self.assertEqual(4, len(cols)) self._assert_common_cols(cols) self.assertEqual("Price", cols[3].name) self.assertEqual(dtypes.double, cols[3].data_type) def test_json_spec(self): """ Check a JSON Kafka subscription creates the right table. """ t = ck.consume( {'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.json_spec( [('Symbol', dtypes.string), ('Side', dtypes.string), ('Price', dtypes.double), ('Qty', dtypes.int_), ('Tstamp', dtypes.DateTime)], mapping={ 'jsymbol': 'Symbol', 'jside': 'Side', 'jprice': 'Price', 'jqty': 'Qty', 'jts': 'Tstamp' } ), table_type=TableType.append() ) cols = t.columns self.assertEqual(8, len(cols)) self._assert_common_cols(cols) self.assertEqual("Symbol", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Price", cols[5].name) self.assertEqual(dtypes.double, cols[5].data_type) self.assertEqual("Qty", cols[6].name) self.assertEqual(dtypes.int_, cols[6].data_type) self.assertEqual("Tstamp", cols[7].name) self.assertEqual(dtypes.DateTime, cols[7].data_type) def test_avro_spec(self): """ Check an Avro Kafka subscription creates the right table. """ schema = \ """ { "type" : "record", "namespace" : "io.deephaven.examples", "name" : "share_price", "fields" : [ { "name" : "Symbol", "type" : "string" }, { "name" : "Side", "type" : "string" }, { "name" : "Qty", "type" : "int" }, { "name" : "Price", "type" : "double" } ] } """ schema_str = '{ "schema" : "%s" }' % \ schema.replace('\n', ' ').replace('"', '\\"') sys_str = \ """ curl -X POST \ -H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' \ --data-binary '%s' \ http://redpanda:8081/subjects/share_price_record/versions """ % schema_str r = os.system(sys_str) self.assertEqual(0, r) with self.subTest(msg='straight schema, no mapping'): t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', schema_version='1'), table_type=TableType.append() ) cols = t.columns self.assertEqual(7, len(cols)) self._assert_common_cols(cols) self.assertEqual("Symbol", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Qty", cols[5].name) self.assertEqual(dtypes.int32, cols[5].data_type) self.assertEqual("Price", cols[6].name) self.assertEqual(dtypes.double, cols[6].data_type) with self.subTest(msg='mapping_only (filter out some schema fields)'): m = {'Symbol': 'Ticker', 'Price': 'Dollars'} t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True), table_type=TableType.append() ) cols = t.columns self.assertEqual(5, len(cols)) self._assert_common_cols(cols) self.assertEqual("Ticker", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Dollars", cols[4].name) self.assertEqual(dtypes.double, cols[4].data_type) with self.subTest(msg='mapping (rename some fields)'): m = {'Symbol': 'Ticker', 'Qty': 'Quantity'} t = ck.consume( { 'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081' }, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m), table_type=TableType.append() ) cols = t.columns self.assertEqual(7, len(cols)) self._assert_common_cols(cols) self.assertEqual("Ticker", cols[3].name) self.assertEqual(dtypes.string, cols[3].data_type) self.assertEqual("Side", cols[4].name) self.assertEqual(dtypes.string, cols[4].data_type) self.assertEqual("Quantity", cols[5].name) self.assertEqual(dtypes.int32, cols[5].data_type) self.assertEqual("Price", cols[6].name) self.assertEqual(dtypes.double, cols[6].data_type) @unittest.skip("https://github.com/deephaven/deephaven-core/pull/2277") def test_deprecated_table_types(self): """ Tests to make sure deprecated TableTypes are equivalent """ self.assertEqual(TableType.append(), TableType.Append) self.assertEqual(TableType.stream(), TableType.Stream) def test_table_types(self): """ Tests TableType construction """ _ = TableType.append() _ = TableType.stream() _ = TableType.ring(4096) if __name__ == "__main__": unittest.main()
py/server/tests/test_kafka_consumer.py
7,835
Check an Avro Kafka subscription creates the right table. Check that the basic constants are imported and visible. Tests to make sure deprecated TableTypes are equivalent Check a JSON Kafka subscription creates the right table. Check a simple Kafka subscription creates the right table. Tests TableType construction Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
380
en
0.645784
import re from base64 import b64decode from datetime import datetime from urllib.parse import urlparse import marshmallow as ma from app.objects.secondclass.c_link import Link from app.utility.base_object import BaseObject from app.utility.base_planning_svc import BasePlanningService class Agent(BaseObject): RESERVED = dict(server='#{server}', group='#{group}', agent_paw='#{paw}', location='#{location}', exe_name='#{exe_name}', payload=re.compile('#{payload:(.*?)}', flags=re.DOTALL)) class AgentSchema(ma.Schema): paw = ma.fields.String() group = ma.fields.String() architecture = ma.fields.String() platform = ma.fields.String() server = ma.fields.String() username = ma.fields.String() location = ma.fields.String() pid = ma.fields.Integer() ppid = ma.fields.Integer() trusted = ma.fields.Boolean() last_seen = ma.fields.DateTime(format='%Y-%m-%d %H:%M:%S') sleep_min = ma.fields.Integer() sleep_max = ma.fields.Integer() executors = ma.fields.List(ma.fields.String()) privilege = ma.fields.String() display_name = ma.fields.String() exe_name = ma.fields.String() host = ma.fields.String() watchdog = ma.fields.Integer() contact = ma.fields.String() links = ma.fields.List(ma.fields.String) @ma.pre_load def remove_nulls(self, in_data, **_): return {k: v for k, v in in_data.items() if v is not None} @property def unique(self): return self.hash(self.paw) @property def display(self): return dict(paw=self.paw, group=self.group, architecture=self.architecture, platform=self.platform, server=self.server, location=self.location, pid=self.pid, ppid=self.ppid, trusted=self.trusted, last_seen=self.last_seen.strftime('%Y-%m-%d %H:%M:%S'), sleep_min=self.sleep_min, sleep_max=self.sleep_max, executors=self.executors, privilege=self.privilege, display_name=self.display_name, exe_name=self.exe_name, host=self.host, watchdog=self.watchdog, contact=self.contact, links=[link.display for link in self.links]) @property def display_name(self): return '{}${}'.format(self.host, self.username) def __init__(self, sleep_min, sleep_max, watchdog, platform='unknown', server='unknown', host='unknown', username='unknown', architecture='unknown', group='red', location='unknown', pid=0, ppid=0, trusted=True, executors=(), privilege='User', exe_name='unknown', contact='unknown', paw=None): super().__init__() self.paw = paw if paw else self.generate_name(size=6) self.host = host self.username = username self.group = group self.architecture = architecture self.platform = platform url = urlparse(server) self.server = '%s://%s:%s' % (url.scheme, url.hostname, url.port) self.location = location self.pid = pid self.ppid = ppid self.trusted = trusted self.created = datetime.now() self.last_seen = self.created self.last_trusted_seen = self.created self.executors = executors self.privilege = privilege self.exe_name = exe_name self.sleep_min = int(sleep_min) self.sleep_max = int(sleep_max) self.watchdog = int(watchdog) self.contact = contact self.links = [] self.access = self.Access.BLUE if group == 'blue' else self.Access.RED @classmethod def from_dict(cls, dict_obj): """ Creates an Agent object from parameters stored in a dict. AgentSchema is used to validate inputs.""" return cls(**cls.AgentSchema().load(dict_obj, partial=['paw'])) def store(self, ram): existing = self.retrieve(ram['agents'], self.unique) if not existing: ram['agents'].append(self) return self.retrieve(ram['agents'], self.unique) return existing async def calculate_sleep(self): return self.jitter('%d/%d' % (self.sleep_min, self.sleep_max)) async def capabilities(self, ability_set): abilities = [] if self.executors: preferred = 'psh' if 'psh' in self.executors else self.executors[0] executors = self.executors for ai in set([pa.ability_id for pa in ability_set]): total_ability = [ab for ab in ability_set if (ab.ability_id == ai) and (ab.platform == self.platform) and (ab.executor in executors)] if len(total_ability) > 0: val = next((ta for ta in total_ability if ta.executor == preferred), total_ability[0]) if self.privileged_to_run(val): abilities.append(val) return abilities async def heartbeat_modification(self, **kwargs): now = datetime.now() self.last_seen = now if self.trusted: self.last_trusted_seen = now self.update('pid', kwargs.get('pid')) self.update('ppid', kwargs.get('ppid')) self.update('server', kwargs.get('server')) self.update('exe_name', kwargs.get('exe_name')) self.update('location', kwargs.get('location')) self.update('privilege', kwargs.get('privilege')) self.update('host', kwargs.get('host')) self.update('username', kwargs.get('username')) self.update('architecture', kwargs.get('architecture')) self.update('platform', kwargs.get('platform')) self.update('executors', kwargs.get('executors')) async def gui_modification(self, **kwargs): loaded = self.AgentSchema(only=('group', 'trusted', 'sleep_min', 'sleep_max', 'watchdog')).load(kwargs) for k, v in loaded.items(): self.update(k, v) async def kill(self): self.update('watchdog', 1) self.update('sleep_min', 60 * 2) self.update('sleep_max', 60 * 2) def replace(self, encoded_cmd, file_svc): decoded_cmd = b64decode(encoded_cmd).decode('utf-8', errors='ignore').replace('\n', '') decoded_cmd = decoded_cmd.replace(self.RESERVED['server'], self.server) decoded_cmd = decoded_cmd.replace(self.RESERVED['group'], self.group) decoded_cmd = decoded_cmd.replace(self.RESERVED['agent_paw'], self.paw) decoded_cmd = decoded_cmd.replace(self.RESERVED['location'], self.location) decoded_cmd = decoded_cmd.replace(self.RESERVED['exe_name'], self.exe_name) decoded_cmd = self._replace_payload_data(decoded_cmd, file_svc) return decoded_cmd def privileged_to_run(self, ability): if not ability.privilege or self.Privileges[self.privilege].value >= self.Privileges[ability.privilege].value: return True return False async def bootstrap(self, data_svc): abilities = [] for i in self.get_config(name='agents', prop='bootstrap_abilities'): for a in await data_svc.locate('abilities', match=dict(ability_id=i)): abilities.append(a) await self.task(abilities) async def task(self, abilities, facts=()): for i in await self.capabilities(abilities): self.links.append(Link(operation=None, command=i.test, paw=self.paw, ability=i)) return await BasePlanningService().add_test_variants(links=self.links, agent=self, facts=facts) """ PRIVATE """ def _replace_payload_data(self, decoded_cmd, file_svc): for uuid in re.findall(self.RESERVED['payload'], decoded_cmd): if self.is_uuid4(uuid): _, display_name = file_svc.get_payload_name_from_uuid(uuid) decoded_cmd = decoded_cmd.replace('#{payload:%s}' % uuid, display_name) return decoded_cmd
app/objects/c_agent.py
7,948
Creates an Agent object from parameters stored in a dict. AgentSchema is used to validate inputs.
97
en
0.640321
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from codecs import open from setuptools import setup, find_packages try: from azure_bdist_wheel import cmdclass except ImportError: from distutils import log as logger logger.warn("Wheel is not available, disabling bdist_wheel hook") VERSION = '0.1.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' 'License :: OSI Approved :: MIT License', ] DEPENDENCIES = [] with open('README.md', 'r', encoding='utf-8') as f: README = f.read() with open('HISTORY.rst', 'r', encoding='utf-8') as f: HISTORY = f.read() setup( name='connection-monitor-preview', version=VERSION, description='Microsoft Azure Command-Line Connection Monitor V2 Extension', author='Microsoft Corporation', author_email='azpycli@microsoft.com', url='https://github.com/Azure/azure-cli-extensions/tree/master/src/connection-monitor-preview', long_description=README + '\n\n' + HISTORY, license='MIT', classifiers=CLASSIFIERS, packages=find_packages(), install_requires=DEPENDENCIES, package_data={'azext_connection_monitor_preview': ['azext_metadata.json']}, )
src/connection-monitor-preview/setup.py
1,976
!/usr/bin/env python -------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- The full list of classifiers is available at https://pypi.python.org/pypi?%3Aaction=list_classifiers
458
en
0.432038
# ------------------------------------------------------------------------------ # CodeHawk Binary Analyzer # Author: Henny Sipma # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # Copyright (c) 2020 Henny Sipma # Copyright (c) 2021-2022 Aarno Labs LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ """Utilities to print and save graphviz dot files.""" import os import subprocess from typing import TYPE_CHECKING if TYPE_CHECKING: from chb.util.DotGraph import DotGraph def print_dot( path: str, filename: str, g: "DotGraph") -> str: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" pdffilename = filename + ".pdf" # write graph to dot format with open(dotfilename, "w") as fp: fp.write(str(g)) # convert dot file to pdf cmd = ["dot", "-Tpdf", "-o", pdffilename, dotfilename] try: subprocess.call(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Error in processing dot file: " + dotfilename) print(e.output) print(e.args) exit(1) return pdffilename def save_dot(path: str, filename: str, g: "DotGraph") -> None: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" with open(dotfilename, "w") as fp: fp.write(str(g)) def save_svg(path: str, filename: str, g: "DotGraph") -> None: if not os.path.isabs(filename): filename = os.path.join(path, filename) dotfilename = filename + ".dot" svgfilename = filename + ".svg" with open(dotfilename, "w") as fp: fp.write(str(g)) cmd = ["dot", "-Tsvg", "-o", svgfilename, dotfilename] try: subprocess.call(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print("Error in processing dot file: " + dotfilename) print(e.output) print(e.args) exit(1)
chb/util/dotutil.py
3,201
Utilities to print and save graphviz dot files. ------------------------------------------------------------------------------ CodeHawk Binary Analyzer Author: Henny Sipma ------------------------------------------------------------------------------ The MIT License (MIT) Copyright (c) 2016-2020 Kestrel Technology LLC Copyright (c) 2020 Henny Sipma Copyright (c) 2021-2022 Aarno Labs LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ write graph to dot format convert dot file to pdf
1,546
en
0.757498
# qubit number=3 # total number=9 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ import networkx as nx from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def make_circuit(n:int) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") prog = QuantumCircuit(input_qubit) prog.h(input_qubit[0]) # number=1 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 for edge in E: k = edge[0] l = edge[1] prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1]) prog.p(gamma, k) prog.p(gamma, l) prog.rx(2 * beta, range(len(V))) prog.swap(input_qubit[1],input_qubit[0]) # number=5 prog.swap(input_qubit[1],input_qubit[0]) # number=6 prog.y(input_qubit[3]) # number=7 prog.y(input_qubit[3]) # number=8 # circuit end return prog if __name__ == '__main__': n = 4 V = np.arange(0, n, 1) E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) step_size = 0.1 a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma, a_beta) F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * ( 1 + np.cos(4 * a_gamma) ** 2) result = np.where(F1 == np.amax(F1)) a = list(zip(result[0], result[1]))[0] gamma = a[0] * step_size beta = a[1] * step_size prog = make_circuit(4) sample_shot =5200 writefile = open("../data/startQiskit_Class82.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) backend = BasicAer.get_backend('statevector_simulator') circuit1 = transpile(prog, FakeYorktown()) prog = circuit1 info = execute(prog,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
data/p4VQE/R1/benchmark/startQiskit_Class82.py
2,395
qubit number=3 total number=9 circuit begin number=1 number=2 number=3 number=4 number=5 number=6 number=7 number=8 circuit end prog.draw('mpl', filename=(kernel + '.png'))
172
en
0.175413
# flake8: noqa """ OpenAPI definition No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v0 Generated by: https://openapi-generator.tech """ __version__ = "0.6.0" # import ApiClient from gooddata_afm_client.api_client import ApiClient # import Configuration from gooddata_afm_client.configuration import Configuration # import exceptions from gooddata_afm_client.exceptions import OpenApiException from gooddata_afm_client.exceptions import ApiAttributeError from gooddata_afm_client.exceptions import ApiTypeError from gooddata_afm_client.exceptions import ApiValueError from gooddata_afm_client.exceptions import ApiKeyError from gooddata_afm_client.exceptions import ApiException
gooddata-afm-client/gooddata_afm_client/__init__.py
814
OpenAPI definition No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v0 Generated by: https://openapi-generator.tech flake8: noqa import ApiClient import Configuration import exceptions
297
en
0.568822
from __future__ import absolute_import from __future__ import division from __future__ import print_function from matplotlib import pyplot as plt import tensorflow as tf import seaborn as sb import pandas as pd import numpy as np import math import time import cv2 import os tf.reset_default_graph() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # tip: if you run into problems with TensorBoard # clear the contents of this directory, re-run this script # then restart TensorBoard to see the result # LOGDIR = './graphs' model_frames = 64 NUM_CLASSES = 74 NUM_PIXELS = 88 * 128 TRAIN_STEPS = 0 BATCH_SIZE = 1 << 5 MODEL_ANGLE_DICT = {'000': True, '018': False, '036': False, '054': False, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False} TEST_ANGLE_DICT = {'000': False, '018': False, '036': False, '054': True, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False} LEARNING_RATE = 1e-4 DATA_PATH = 'Generated_full_data_GEI' start_time = time.time() keep_prob = 0.5 #dropout (keep probability) def del_files(path): for root, dirs, files in os.walk(path): for name in files: if name.startswith("."): os.remove(os.path.join(root, name)) print("Delete File: " + os.path.join(root, name)) def get_label(_index, num_classes): # label = np.zeros(shape=[num_classes], dtype='float32') # label[int(_index) - 1] = 1 # return label return (int(_index) - 1) def load_images_from_folder(folder, model_angle_dict, test_angle_dict): train_frames = [] train_labels = [] probe_frames = [] probe_labels = [] for i in xrange(11): train_frames.append([]) for i in xrange(11): train_labels.append([]) for i in xrange(11): probe_frames.append([]) for i in xrange(11): probe_labels.append([]) for human_id in os.listdir(os.path.join(folder, 'train')): if int(human_id) < 74: continue for angle in os.listdir(os.path.join(folder, 'train', human_id)): # if not model_angle_dict[angle]: # continue for _type in os.listdir(os.path.join(folder, 'train', human_id, angle)): img = cv2.imread(os.path.join(folder, 'train', human_id, angle, _type), 0) if img is not None: train_frames[int(angle) // 18].append(img.flatten()) train_labels[int(angle) // 18].append(get_label(human_id, 124)) for human_id in os.listdir(os.path.join(folder, 'test')): for angle in os.listdir(os.path.join(folder, 'test', human_id)): # if not test_angle_dict[angle]: # continue for _type in os.listdir(os.path.join(folder, 'test', human_id, angle)): img = cv2.imread(os.path.join(folder, 'test', human_id, angle, _type), 0) if img is not None: probe_frames[int(angle) // 18].append(img.flatten()) probe_labels[int(angle) // 18].append(get_label(human_id, 124)) return (train_frames, train_labels, probe_frames, probe_labels) del_files(DATA_PATH) (train_frames, train_labels, probe_frames, probe_labels) = load_images_from_folder(DATA_PATH, MODEL_ANGLE_DICT, TEST_ANGLE_DICT) # Define inputs with tf.name_scope('input'): images = tf.placeholder(tf.float32, [None, NUM_PIXELS], name="pixels") labels = tf.placeholder(tf.float32, [None, NUM_CLASSES], name="labels") # dropout_prob = tf.placeholder_with_default(1.0, shape=()) # Create some wrappers for simplicity def conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2): # MaxPool2D wrapper return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, 2, 2, 1], padding='SAME') # Create model def conv_net(x, weights, biases, dropout): # Reshape input picture x = tf.reshape(x, shape=[-1, 128, 88, 1]) # Convolution Layer conv1 = conv2d(x, weights['wc1'], biases['bc1']) # Max Pooling (down-sampling) conv1 = maxpool2d(conv1, k=2) conv1 = tf.contrib.layers.batch_norm(conv1) # Convolution Layer conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) # Max Pooling (down-sampling) conv2 = maxpool2d(conv2, k=3) conv2 = tf.contrib.layers.batch_norm(conv2) # Fully connected layer # Reshape conv2 output to fit fully connected layer input fc3 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) fc3 = tf.add(tf.matmul(fc3, weights['wd1']), biases['bd1']) fc3 = tf.nn.relu(fc3) # Apply Dropout # fc1 = tf.nn.dropout(fc1, dropout) # fc3 = tf.nn.dropout(fc3, dropout_prob) # # Output, class prediction fc4 = tf.add(tf.matmul(fc3, weights['fc4']), biases['fc4']) return fc3 # Store layers weight & bias initializer = tf.contrib.layers.xavier_initializer() weights = { # 7x7 conv, 1 input, 18 outputs 'wc1': tf.Variable(initializer([7, 7, 1, 18])), # 5x5 conv, 18 inputs, 45 outputs 'wc2': tf.Variable(initializer([5, 5, 18, 45])), # fully connected, 7*7*64 inputs, 1024 outputs 'wd1': tf.Variable(initializer([32*22*45, 1024])), # # 1024 inputs, 10 outputs (class prediction) 'fc4': tf.Variable(initializer([1024, NUM_CLASSES])) } biases = { 'bc1': tf.Variable(tf.random_normal([18])), 'bc2': tf.Variable(tf.random_normal([45])), 'bd1': tf.Variable(tf.random_normal([1024])), 'fc4': tf.Variable(tf.random_normal([NUM_CLASSES])) } y = conv_net(images, weights, biases, keep_prob) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, "./full_tri_model/model.ckpt") print("%d frames model restored."%model_frames) print(' ', end=',') for i in xrange(11): print('%4d'%(i * 18), end=',') print_map = np.zeros(shape=(11, 11), dtype=np.float32) gallery_encoding = [] probe_encoding = [] for a in range(11): gallery_encoding.append(sess.run(y, feed_dict={images: train_frames[a]})) for a in range(11): probe_encoding.append(sess.run(y, feed_dict={images: probe_frames[a]})) for a in range(11): print('') print('%3d'%(a * 18), end=',') for b in range(11): simlarity = np.zeros(shape=[len(probe_encoding[b]), len(gallery_encoding[a])], dtype=np.float32) pred_label = np.zeros(shape=[len(probe_encoding[b])], dtype=np.int) for i in range(len(probe_encoding[b])): for j in range(len(gallery_encoding[a])): simlarity[i][j] = np.exp(-(((probe_encoding[b][i] - gallery_encoding[a][j])/1024.0)**2).sum()) # import pdb # pdb.set_trace() tmp_index = simlarity[i].argmax() pred_label[i] = train_labels[a][tmp_index] # if not (pred_label[i] == probe_labels[i]): # print(str((pred_label[i] == probe_labels[i])) + ' ' + str(pred_label[i]) + ' ' + str(probe_labels[i])) acc = np.sum(pred_label[:] == probe_labels[b][:]) # print_map[b][10 - a] = 100.0 * acc/(len(probe_labels[b])*1.0) print_map[b][a] = 100.0 * acc/(len(probe_labels[b])*1.0) print('%.2f'%(100.0 * acc/(len(probe_labels[b])*1.0)), end=',') print(print_map) grid_visualization = np.array(print_map.transpose()) grid_visualization.shape = (11, 11) sb.heatmap(grid_visualization, cmap='Oranges') plt.xticks(np.arange(11) + 0.5, xrange(0, 181, 18)) plt.yticks(np.arange(11) + 0.5, xrange(180, -1, -18)) plt.xlabel('Gallery Angle') plt.ylabel('Probe Angle') plt.show()
classification/casiab_performance.py
7,911
tip: if you run into problems with TensorBoard clear the contents of this directory, re-run this script then restart TensorBoard to see the result LOGDIR = './graphs' dropout (keep probability) label = np.zeros(shape=[num_classes], dtype='float32') label[int(_index) - 1] = 1 return label if not model_angle_dict[angle]: continue if not test_angle_dict[angle]: continue Define inputs dropout_prob = tf.placeholder_with_default(1.0, shape=()) Create some wrappers for simplicity Conv2D wrapper, with bias and relu activation MaxPool2D wrapper Create model Reshape input picture Convolution Layer Max Pooling (down-sampling) Convolution Layer Max Pooling (down-sampling) Fully connected layer Reshape conv2 output to fit fully connected layer input Apply Dropout fc1 = tf.nn.dropout(fc1, dropout) fc3 = tf.nn.dropout(fc3, dropout_prob) Output, class prediction Store layers weight & bias 7x7 conv, 1 input, 18 outputs 5x5 conv, 18 inputs, 45 outputs fully connected, 7*7*64 inputs, 1024 outputs 1024 inputs, 10 outputs (class prediction) import pdb pdb.set_trace() if not (pred_label[i] == probe_labels[i]): print(str((pred_label[i] == probe_labels[i])) + ' ' + str(pred_label[i]) + ' ' + str(probe_labels[i])) print_map[b][10 - a] = 100.0 * acc/(len(probe_labels[b])*1.0)
1,284
en
0.508491
""" Parsing time durations from strings This module provides a function that parses time durations from strings. It has been copied from the django software, which comes with the following notes: Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import datetime import re standard_duration_re = re.compile( r"^" r"(?:(?P<days>-?\d+) (days?, )?)?" r"((?:(?P<hours>-?\d+):)(?=\d+:\d+))?" r"(?:(?P<minutes>-?\d+):)?" r"(?P<seconds>-?\d+)" r"(?:\.(?P<microseconds>\d{1,6})\d{0,6})?" r"$" ) # Support the sections of ISO 8601 date representation that are accepted by # timedelta iso8601_duration_re = re.compile( r"^(?P<sign>[-+]?)" r"P" r"(?:(?P<days>\d+(.\d+)?)D)?" r"(?:T" r"(?:(?P<hours>\d+(.\d+)?)H)?" r"(?:(?P<minutes>\d+(.\d+)?)M)?" r"(?:(?P<seconds>\d+(.\d+)?)S)?" r")?" r"$" ) # Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The # year-month and mixed intervals cannot be converted to a timedelta and thus # aren't accepted. postgres_interval_re = re.compile( r"^" r"(?:(?P<days>-?\d+) (days? ?))?" r"(?:(?P<sign>[-+])?" r"(?P<hours>\d+):" r"(?P<minutes>\d\d):" r"(?P<seconds>\d\d)" r"(?:\.(?P<microseconds>\d{1,6}))?" r")?$" ) def parse_duration(value: str) -> datetime.timedelta: """Parse a duration string and return a datetime.timedelta. Args: value (str): A time duration given as text. The preferred format for durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601 representation and PostgreSQL's day-time interval format. Returns: datetime.timedelta: An instance representing the duration. """ match = ( standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value) ) if match: kw = match.groupdict() days = datetime.timedelta(float(kw.pop("days", 0) or 0)) sign = -1 if kw.pop("sign", "+") == "-" else 1 if kw.get("microseconds"): kw["microseconds"] = kw["microseconds"].ljust(6, "0") if ( kw.get("seconds") and kw.get("microseconds") and kw["seconds"].startswith("-") ): kw["microseconds"] = "-" + kw["microseconds"] kw = {k: float(v) for k, v in kw.items() if v is not None} return days + sign * datetime.timedelta(**kw) # type: ignore else: raise ValueError(f"The time duration {value} cannot be parsed.") __all__ = ["parse_duration"]
pde/tools/parse_duration.py
4,077
Parse a duration string and return a datetime.timedelta. Args: value (str): A time duration given as text. The preferred format for durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601 representation and PostgreSQL's day-time interval format. Returns: datetime.timedelta: An instance representing the duration. Parsing time durations from strings This module provides a function that parses time durations from strings. It has been copied from the django software, which comes with the following notes: Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Support the sections of ISO 8601 date representation that are accepted by timedelta Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The year-month and mixed intervals cannot be converted to a timedelta and thus aren't accepted. type: ignore
2,363
en
0.861665
class solution: def twoSum(self,nums,target): hash_map=dict() # Python字典初始化{}性能比dict()好 for i,x in enumerate(nums): if target-x in hash_map: return [i,hash_map[target-x]] hash_map[x]=i
LeetCode/Leetcode-2019Summer/Leetcode001(a+b).py
281
Python字典初始化{}性能比dict()好
23
zh
0.187498
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2015-2017 Lionheart Software LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import runpy try: from setuptools import setup except ImportError: from distutils.core import setup metadata_filename = "bigstore/metadata.py" metadata = runpy.run_path(metadata_filename) # http://pypi.python.org/pypi?:action=list_classifiers classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Version Control", "Topic :: Utilities", ] setup( name='git-bigstore', description="Track big files with Git.", version=metadata['__version__'], license=metadata['__license__'], classifiers=classifiers, author=metadata['__author__'], author_email=metadata['__email__'], url="https://github.com/lionheart/git-bigstore", packages=[ 'bigstore.backends', 'bigstore', ], scripts=[ 'bin/git-bigstore', ], install_requires=[ 'future', 'gitpython<3', 'boto', 'boto3', 'python-dateutil', 'pytz', 'python-cloudfiles;python_version<="2.7"', ], )
setup.py
2,138
!/usr/bin/env python -*- coding: utf-8 -*- Copyright 2015-2017 Lionheart Software LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. http://pypi.python.org/pypi?:action=list_classifiers
660
en
0.797893
# -*- coding: utf-8 -*- """ test_searchadapters ~~~~~~~~~~~~~~~~~~~ Test the Web Support Package search adapters. :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ from six import StringIO from sphinx.websupport import WebSupport from test_websupport import sqlalchemy_missing from util import rootdir, tempdir, skip_if, skip_unless_importable def teardown_module(): (tempdir / 'websupport').rmtree(True) def search_adapter_helper(adapter): settings = {'srcdir': rootdir / 'roots' / 'test-searchadapters', 'builddir': tempdir / 'websupport', 'status': StringIO(), 'warning': StringIO(), 'search': adapter} support = WebSupport(**settings) support.build() s = support.search # Test the adapters query method. A search for "Epigraph" should return # one result. results = s.query(u'Epigraph') assert len(results) == 1, \ '%s search adapter returned %s search result(s), should have been 1'\ % (adapter, len(results)) # Make sure documents are properly updated by the search adapter. s.init_indexing(changed=['markup']) s.add_document(u'markup', u'filename', u'title', u'SomeLongRandomWord') s.finish_indexing() # Now a search for "Epigraph" should return zero results. results = s.query(u'Epigraph') assert len(results) == 0, \ '%s search adapter returned %s search result(s), should have been 0'\ % (adapter, len(results)) # A search for "SomeLongRandomWord" should return one result. results = s.query(u'SomeLongRandomWord') assert len(results) == 1, \ '%s search adapter returned %s search result(s), should have been 1'\ % (adapter, len(results)) # Make sure it works through the WebSupport API support.get_search_results(u'SomeLongRandomWord') @skip_unless_importable('xapian', 'needs xapian bindings installed') @skip_if(sqlalchemy_missing, 'needs sqlalchemy') def test_xapian(): search_adapter_helper('xapian') @skip_unless_importable('whoosh', 'needs whoosh package installed') @skip_if(sqlalchemy_missing, 'needs sqlalchemy') def test_whoosh(): search_adapter_helper('whoosh')
tests/test_searchadapters.py
2,283
test_searchadapters ~~~~~~~~~~~~~~~~~~~ Test the Web Support Package search adapters. :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. -*- coding: utf-8 -*- Test the adapters query method. A search for "Epigraph" should return one result. Make sure documents are properly updated by the search adapter. Now a search for "Epigraph" should return zero results. A search for "SomeLongRandomWord" should return one result. Make sure it works through the WebSupport API
524
en
0.778586
from __future__ import division, print_function, absolute_import from subprocess import Popen, PIPE, STDOUT import numpy as np SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024] def gen_data(dt): arrays = {} if dt == np.float128: pg = './fftw_longdouble' elif dt == np.double: pg = './fftw_double' elif dt == np.float32: pg = './fftw_single' else: raise ValueError("unknown: %s" % dt) # Generate test data using FFTW for reference for type in [1, 2, 3, 4, 5, 6, 7, 8]: arrays[type] = {} for sz in SZ: a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT) st = [i.decode('ascii').strip() for i in a.stdout.readlines()] arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt) return arrays # generate single precision data data = gen_data(np.float32) filename = 'fftw_single_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d) # generate double precision data data = gen_data(np.float64) filename = 'fftw_double_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d) # generate long double precision data data = gen_data(np.float128) filename = 'fftw_longdouble_ref' # Save ref data into npz format d = {'sizes': SZ} for type in [1, 2, 3, 4]: for sz in SZ: d['dct_%d_%d' % (type, sz)] = data[type][sz] d['sizes'] = SZ for type in [5, 6, 7, 8]: for sz in SZ: d['dst_%d_%d' % (type-4, sz)] = data[type][sz] np.savez(filename, **d)
environment/lib/python3.8/site-packages/scipy/fftpack/tests/gen_fftw_ref.py
1,989
Generate test data using FFTW for reference generate single precision data Save ref data into npz format generate double precision data Save ref data into npz format generate long double precision data Save ref data into npz format
231
en
0.285886
# coding: utf-8 import numpy as np import csv import codecs import os import glob from collections import defaultdict SPACE = " " EMPTY = " " INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '!':4, '-':5, ';':6, ':':7, '...':8, '':0} PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?', 4:'!', 5:'-', 6:';', 7:':', 8:'...'} REDUCED_PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?'} REDUCED_INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '':0} EOS_PUNCTUATION_CODES = [2,3,4,5,6,7,8] END = "<END>" UNK = "<UNK>" EMP = "<EMP>" NA = "NA" #PAUSE_FEATURE_NAME = 'pause_before' #ALL_POSSIBLE_INPUT_FEATURES = {'word', 'pos', 'pause_before', 'speech_rate_norm', 'f0_mean', 'f0_range', 'i0_mean', 'i0_range'} def pad(l, size, padding): if size >= len(l): return l + [padding] * abs((len(l)-size)) else: return l[0:size] def read_proscript(filename, add_end=False): columns = defaultdict(list) # each value in each column is appended to a list skip_columns = [] with open(filename) as f: reader = csv.DictReader(f, delimiter='|') # read rows into a dictionary format for row in reader: # read a row as {column1: value1, column2: value2,...} for (k,v) in row.items(): # go over each column name and value if not k in skip_columns: if "word" in k or "punctuation" in k or "pos" in k: columns[k].append(v) # append the value into the appropriate list else: try: columns[k].append(float(v)) # real value except ValueError: skip_columns.append(k) if add_end and not columns['word'][-1] == END: for k in columns.keys(): if "word" in k or "pos" in k: columns[k].append(END) elif "punctuation" in k: columns[k].append("") else: columns[k].append(0.0) return columns def checkArgument(argname, isFile=False, isDir=False, createDir=False): if not argname: return False else: if isFile and not os.path.isfile(argname): return False if isDir: if not os.path.isdir(argname): if createDir: print("Creating directory %s"%(argname)) os.makedirs(argname) else: return False return True def iterable_to_dict(arr): return dict((x.strip(), i) for (i, x) in enumerate(arr)) def read_vocabulary(file_name): with codecs.open(file_name, 'r', 'utf-8') as f: return iterable_to_dict(f.readlines()) def to_array(arr, dtype=np.int32): # minibatch of 1 sequence as column return np.array([arr], dtype=dtype).T def create_pause_bins(): bins = np.arange(0, 1, 0.05) bins = np.concatenate((bins, np.arange(1, 2, 0.1))) bins = np.concatenate((bins, np.arange(2, 5, 0.2))) bins = np.concatenate((bins, np.arange(5, 10, 0.5))) bins = np.concatenate((bins, np.arange(10, 20, 1))) return bins def create_pause_bins9(): bins = np.array([ 0. , 0.25, 0.5 , 0.75, 1. , 2. , 3. , 4. , 5. ]) return bins def create_pause_bins2(): return [0.0, 1.14] def create_pause_bins3(): return [0.0, 0.2, 1.0] def create_semitone_bins(): bins = np.arange(-20, -10, 1) bins = np.concatenate((bins, np.arange(-10, -5, 0.5))) bins = np.concatenate((bins, np.arange(-5, 0, 0.25))) bins = np.concatenate((bins, np.arange(0, 5, 0.25))) bins = np.concatenate((bins, np.arange(5, 10, 0.5))) bins = np.concatenate((bins, np.arange(10, 20, 1))) return bins def levels_from_file(filename): with open(filename) as f: lst = [float(line.rstrip()) for line in f] return lst def get_level_maker(levels_file): levels_list = levels_from_file(levels_file) def get_level(value): level = 0 for level_bin in levels_list: if value > level_bin: level +=1 else: return level return level no_of_levels = len(levels_list) + 1 return get_level, no_of_levels #OBSOLETE def convert_value_to_level_sequence(value_sequence, bins): levels = [] for value in value_sequence: level = 0 for bin_no, bin_upper_limit in enumerate(bins): if value > bin_upper_limit: level += 1 else: break levels.append(level) return levels def reducePuncCode(puncCode): if puncCode in [4, 5, 6, 7, 8]: #period return 2 else: return puncCode def reducePunc(punc): if punc and not punc.isspace(): puncCode = INV_PUNCTUATION_CODES[punc] reducedPuncCode = reducePuncCode(puncCode) return PUNCTUATION_VOCABULARY[reducedPuncCode] else: return punc
utilities.py
4,320
coding: utf-8PAUSE_FEATURE_NAME = 'pause_before'ALL_POSSIBLE_INPUT_FEATURES = {'word', 'pos', 'pause_before', 'speech_rate_norm', 'f0_mean', 'f0_range', 'i0_mean', 'i0_range'} each value in each column is appended to a list read rows into a dictionary format read a row as {column1: value1, column2: value2,...} go over each column name and value append the value into the appropriate list real value minibatch of 1 sequence as columnOBSOLETEperiod
449
en
0.674114
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php def asbool(obj): if isinstance(obj, str): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError( "String is not true/false: %r" % obj) return bool(obj) def aslist(obj, sep=None, strip=True): if isinstance(obj, str): lst = obj.split(sep) if strip: lst = [v.strip() for v in lst] return lst elif isinstance(obj, (list, tuple)): return obj elif obj is None: return [] else: return [obj]
ita/web/beaker/converters.py
844
(c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
164
en
0.590981
# Copyright (c) 2016 Universidade Federal Fluminense (UFF) # Copyright (c) 2016 Polytechnic Institute of New York University. # This file is part of noWorkflow. # Please, consult the license terms in the LICENSE file. """Trial Model""" from __future__ import (absolute_import, print_function, division, unicode_literals) import os from sqlalchemy import Column, Integer, Text, TIMESTAMP from sqlalchemy import ForeignKeyConstraint, select, func, distinct from ...utils.formatter import PrettyLines from ...utils.prolog import PrologDescription, PrologTrial, PrologNullableRepr from ...utils.prolog import PrologTimestamp, PrologAttribute, PrologRepr from ...utils.prolog import PrologNullable from .. import relational, content, persistence_config from .base import AlchemyProxy, proxy_class, query_many_property, proxy_gen from .base import one, many_ref, many_viewonly_ref, backref_many, is_none from .base import proxy from .trial_prolog import TrialProlog from .trial_dot import TrialDot from .module import Module from .dependency import Dependency from .activation import Activation from .head import Head from .graphs.trial_graph import TrialGraph from .graphs.dependency_graph import DependencyConfig, DependencyFilter from .graphs.dependency_graph import PrologVisitor @proxy_class # pylint: disable=too-many-public-methods class Trial(AlchemyProxy): """Represent a trial Initialize it by passing a trial reference: trial = Trial(2) There are four visualization modes for the graph: tree: activation tree without any filters trial.graph.mode = 0 no match: tree transformed into a graph by the addition of sequence and return edges and removal of intermediate call edges trial.graph.mode = 1 exact match: calls are only combined when all the sub-call match trial.graph.mode = 2 namesapce: calls are combined without considering the sub-calls trial.graph.mode = 3 You can change the graph width and height by the variables: trial.graph.width = 600 trial.graph.height = 400 """ __tablename__ = "trial" __table_args__ = ( ForeignKeyConstraint(["inherited_id"], ["trial.id"], ondelete="RESTRICT"), ForeignKeyConstraint(["parent_id"], ["trial.id"], ondelete="SET NULL"), {"sqlite_autoincrement": True}, ) id = Column(Integer, primary_key=True) # pylint: disable=invalid-name start = Column(TIMESTAMP) finish = Column(TIMESTAMP) script = Column(Text) code_hash = Column(Text) arguments = Column(Text) command = Column(Text) inherited_id = Column(Integer, index=True) parent_id = Column(Integer, index=True) run = Column(Integer) docstring = Column(Text) inherited = one( "Trial", backref="bypass_children", viewonly=True, remote_side=[id], primaryjoin=(id == inherited_id) ) parent = one( "Trial", backref="children", viewonly=True, remote_side=[id], primaryjoin=(id == parent_id) ) function_defs = many_ref("trial", "FunctionDef") module_dependencies = many_ref("trials", "Dependency") dmodules = many_ref("trials", "Module", secondary=Dependency.t) environment_attrs = many_ref("trial", "EnvironmentAttr") activations = many_ref("trial", "Activation", order_by=Activation.m.start) file_accesses = many_viewonly_ref("trial", "FileAccess") objects = many_viewonly_ref("trial", "Object") object_values = many_viewonly_ref("trial", "ObjectValue") variables = many_viewonly_ref("trial", "Variable") variable_usages = many_viewonly_ref("trial", "VariableUsage") variable_dependencies = many_viewonly_ref("trial", "VariableDependency") tags = many_ref("trial", "Tag") bypass_children = backref_many("bypass_children") # Trial.inherited children = backref_many("children") # Trial.parent @query_many_property def local_modules(self): """Load local modules. Return SQLAlchemy query""" return self.modules.filter( # pylint: disable=no-member Module.m.path.like("%{}%".format(persistence_config.base_path))) @query_many_property def modules(self): """Load modules. Return SQLAlchemy query""" if self.inherited: return self.inherited.modules return self.dmodules @query_many_property def dependencies(self): """Load modules. Return SQLAlchemy query""" if self.inherited: return self.inherited.dependencies return self.module_dependencies @query_many_property def initial_activations(self): """Return initial activation as a SQLAlchemy query""" return self.activations.filter(is_none(Activation.m.caller_id)) DEFAULT = { "dependency_config.show_blackbox_dependencies": False, "dot.format": "png", "graph.width": 500, "graph.height": 500, "graph.mode": 3, "graph.use_cache": True, "prolog.use_cache": True, } REPLACE = { "dependency_config_show_blackbox_dependencies": "dependency_config.show_blackbox_dependencies", "dot_format": "dot.format", "graph_width": "graph.width", "graph_height": "graph.height", "graph_mode": "graph.mode", "graph_use_cache": "graph.use_cache", "prolog_use_cache": "prolog.use_cache", } prolog_description = PrologDescription("trial", ( PrologTrial("id"), PrologTimestamp("start"), PrologTimestamp("finish"), PrologRepr("script"), PrologRepr("code_hash"), PrologRepr("command"), PrologNullable("inherited_id", link="trial.id"), PrologNullable("parent_id", link="trial.id"), PrologAttribute("run"), PrologNullableRepr("docstring"), ), description=( "informs that a given *script* with *docstring*,\n" "and content *code_hash*,\n" "executed during a time period from *start*" "to *finish*,\n" "using noWokflow's *command*,\n" "that generated a trial *id*.\n" "This trial uses modules from *inherited_id*,\n" "is based on *parent_id*,\n" "and might be a *run* or a backup trial." )) def __init__(self, *args, **kwargs): if args and isinstance(args[0], relational.base): obj = args[0] trial_ref = obj.id elif args: trial_ref = kwargs.get("trial_ref", args[0]) else: trial_ref = kwargs.get("trial_ref", None) # Check if it is a new trial or a query script = kwargs.get("trial_script", None) if "use_cache" in kwargs: cache = kwargs["use_cache"] kwargs["graph_use_cache"] = kwargs.get("graph_use_cache", cache) kwargs["prolog_use_cache"] = kwargs.get("graph_use_cache", cache) session = relational.session if not trial_ref or trial_ref == -1: obj = Trial.last_trial(script=script, session=session) if "graph_use_cache" not in kwargs: kwargs["graph_use_cache"] = False if "prolog_use_cache" not in kwargs: kwargs["prolog_use_cache"] = False else: obj = Trial.load_trial(trial_ref, session=session) if obj is None: raise RuntimeError("Trial {} not found".format(trial_ref)) super(Trial, self).__init__(obj) #self._store_pk(obj) #self._restore_instance() self.dependency_config = DependencyConfig() self.dependency_filter = DependencyFilter(self) self.graph = TrialGraph(self) self.prolog = TrialProlog(self) self.dot = TrialDot(self) self.initialize_default(kwargs) self._prolog_visitor = None @property def prolog_variables(self): """Return filtered prolog variables""" if not self._prolog_visitor: self.dependency_filter.run() self._prolog_visitor = PrologVisitor(self.dependency_filter) self._prolog_visitor.visit(self.dependency_filter.main_cluster) return self._prolog_visitor @property def script_content(self): """Return the "main" script content of the trial""" return PrettyLines( content.get(self.code_hash) .decode("utf-8").split("/n")) @property def finished(self): """Check if trial has finished""" return bool(self.finish) @property def status(self): """Check trial status Possible statuses: finished, unfinished, backup""" if not self.run: return "backup" return "finished" if self.finished else "unfinished" @property def duration(self): """Calculate trial duration. Return microseconds""" if self.finish: return int((self.finish - self.start).total_seconds() * 1000000) return 0 @property def duration_text(self): """Calculate trial duration. Return formatted str""" if self.finish: return str(self.finish - self.start) return "None" @property def environment(self): """Return dict: environment variables -> value""" return {e.name: e.value for e in self.environment_attrs} def versioned_files(self, skip_script=False, skip_local=False, skip_access=False): """Find first files accessed in a trial Return map with relative path -> (code_hash, type) Possible types: script, module, access """ files = {} def add(path, info): """Add file to dict""" if os.path.isabs(path): if not persistence_config.base_path in path: return path = os.path.relpath(path, persistence_config.base_path) files[path] = info if not skip_script: add(self.script, {"code_hash": self.code_hash, "type": "script"}) if not skip_local: for module in self.local_modules: # pylint: disable=not-an-iterable add(module.path, { "code_hash": module.code_hash, "type": "module", "name": module.name }) if not skip_access: for faccess in reversed(list(self.file_accesses)): add(faccess.name, { "code_hash": faccess.content_hash_before, "type": "access", }) return files def iterate_accesses(self, path=None): """Iterate on all access to a path""" if not path or self.script.endswith(path): yield self.script, {"code_hash": self.code_hash, "type": "script"} for module in self.local_modules: # pylint: disable=not-an-iterable if not path or module.path.endswith(path): yield module.path, { "code_hash": module.code_hash, "type": "module", "name": module.name } for faccess in list(self.file_accesses): if not path or faccess.name.endswith(path): yield faccess.name, { "code_hash": faccess.content_hash_before, "type": "access", } yield faccess.name, { "code_hash": faccess.content_hash_after, "type": "access", } def create_head(self): """Create head for this trial""" session = relational.make_session() session.query(Head.m).filter(Head.m.script == self.script).delete() # pylint: disable=no-member session.add(Head.m(trial_id=self.id, script=self.script)) # pylint: disable=no-member, not-callable session.commit() # pylint: disable=no-member def query(self, query): """Run prolog query""" return self.prolog.query(query) def _ipython_display_(self): """Display history graph""" if hasattr(self, "graph"): # pylint: disable=protected-access return self.graph._ipython_display_() from IPython.display import display display({ 'text/plain': 'Trial {}'.format(self.id) }, raw=True) def show(self, _print=print): """Print trial information""" _print("""\ Id: {t.id} Inherited Id: {t.inherited_id} Script: {t.script} Code hash: {t.code_hash} Start: {t.start} Finish: {t.finish} Duration: {t.duration_text}\ """.format(t=self)) def __repr__(self): return "Trial({})".format(self.id) @classmethod # query def distinct_scripts(cls): """Return a set with distinct scripts""" return {s[0].rsplit("/", 1)[-1] for s in relational.session.query(distinct(cls.m.script))} @classmethod # query def reverse_trials(cls, limit, session=None): """Return a generator with <limit> trials ordered by start time desc""" session = session or relational.session return proxy_gen( session.query(cls.m) .order_by(cls.m.start.desc()) .limit(limit) ) @classmethod # query def last_trial(cls, script=None, parent_required=False, session=None): """Return last trial according to start time Keyword arguments: script -- specify the desired script (default=None) parent_required -- valid only if script exists (default=False) """ model = cls.m session = session or relational.session trial = ( session.query(model) .filter(model.start.in_( select([func.max(model.start)]) .where(model.script == script) )) ).first() if trial or parent_required: return trial return ( session.query(model) .filter(model.start.in_( select([func.max(model.start)]) )) ).first() @classmethod # query def find_by_name_and_time(cls, script, timestamp, trial=None, session=None): """Return the first trial according to script and timestamp Arguments: script -- specify the desired script timestamp -- specify the start of finish time of trial Keyword Arguments: trial -- limit query to a specific trial """ model = cls.m session = session or relational.session query = ( session.query(model) .filter( (model.script == script) & ( model.start.like(timestamp + "%") | model.finish.like(timestamp + "%") ) ).order_by(model.start) ) if trial: query = query.filter(model.id == trial) return proxy(query.first()) @classmethod # query def load_trial(cls, trial_ref, session=None): """Load trial by trial reference Find reference on trials id and tags name """ from .tag import Tag # avoid circular import session = session or relational.session return ( session.query(cls.m) .outerjoin(Tag.m) .filter((cls.m.id == trial_ref) | (Tag.m.name == trial_ref)) ).first() @classmethod # query def load_parent(cls, script, remove=True, parent_required=False, session=None): """Load head trial by script Keyword arguments: remove -- remove from head, after loading (default=True) parent_required -- valid only if script exists (default=False) session -- specify session for loading (default=relational.session) """ session = session or relational.session head = Head.load_head(script, session=session) if head: trial = head.trial if remove: Head.remove(head.id, session=relational.make_session()) elif not head: trial = cls.last_trial( script=script, parent_required=parent_required, session=session) return proxy(trial) @classmethod # query def fast_last_trial_id(cls, session=None): """Load last trial id that did not bypass modules Compile SQLAlchemy core query into string for optimization Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session if not hasattr(cls, "_last_trial_id"): ttrial = cls.t _query = ( select([ttrial.c.id]).where(ttrial.c.start.in_( select([func.max(ttrial.c.start)]) .select_from(ttrial) .where(is_none(ttrial.c.inherited_id)) )) ) cls.last_trial_id = str(_query) an_id = session.execute( cls.last_trial_id).fetchone() if not an_id: raise RuntimeError( "Not able to bypass modules check because no previous trial " "was found" ) return an_id[0] @classmethod # query def fast_update(cls, trial_id, finish, docstring, session=None): """Update finish time of trial Use core sqlalchemy Arguments: trial_id -- trial id finish -- finish time as a datetime object Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session ttrial = cls.t session.execute( ttrial.update() .values(finish=finish, docstring=docstring) .where(ttrial.c.id == trial_id) ) session.commit() @classmethod # query def store(cls, start, script, code_hash, arguments, bypass_modules, # pylint: disable=too-many-arguments command, run, docstring, session=None): """Create trial and assign a new id to it Use core sqlalchemy Arguments: start -- trial start time script -- script name code_hash -- script hash code arguments -- trial arguments bypass_modules -- whether it captured modules or not command -- the full command line with noWorkflow parametes run -- trial created by the run command Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session # ToDo: use core query parent = cls.load_parent(script, parent_required=True) parent_id = parent.id if parent else None inherited_id = None if bypass_modules: inherited_id = cls.fast_last_trial_id() ttrial = cls.__table__ result = session.execute( ttrial.insert(), {"start": start, "script": script, "code_hash": code_hash, "arguments": arguments, "command": command, "run": run, "inherited_id": inherited_id, "parent_id": parent_id, "docstring": docstring}) tid = result.lastrowid session.commit() return tid @classmethod # query def all(cls, session=None): """Return all trials Keyword arguments: session -- specify session for loading (default=relational.session) """ session = session or relational.session return proxy_gen(session.query(cls.m)) def match_status(self, status): """Check if trial statuses matches """ if status == "*": return True return self.status == status def match_script(self, script): """Check if trial scripts matches """ if script == "*": return True return self.script == script @property def str_start(self): """Return start date as string""" return str(self.start) @property def str_finish(self): """Return start date as string""" return str(self.finish) @classmethod # query def count(cls, session=None): """Count number of trials on database """ session = session or relational.session return session.query(cls.m).count()
capture/noworkflow/now/persistence/models/trial.py
20,967
Represent a trial Initialize it by passing a trial reference: trial = Trial(2) There are four visualization modes for the graph: tree: activation tree without any filters trial.graph.mode = 0 no match: tree transformed into a graph by the addition of sequence and return edges and removal of intermediate call edges trial.graph.mode = 1 exact match: calls are only combined when all the sub-call match trial.graph.mode = 2 namesapce: calls are combined without considering the sub-calls trial.graph.mode = 3 You can change the graph width and height by the variables: trial.graph.width = 600 trial.graph.height = 400 Display history graph Add file to dict Return all trials Keyword arguments: session -- specify session for loading (default=relational.session) Count number of trials on database Create head for this trial Load modules. Return SQLAlchemy query Return a set with distinct scripts Calculate trial duration. Return microseconds Calculate trial duration. Return formatted str Return dict: environment variables -> value Load last trial id that did not bypass modules Compile SQLAlchemy core query into string for optimization Keyword arguments: session -- specify session for loading (default=relational.session) Update finish time of trial Use core sqlalchemy Arguments: trial_id -- trial id finish -- finish time as a datetime object Keyword arguments: session -- specify session for loading (default=relational.session) Return the first trial according to script and timestamp Arguments: script -- specify the desired script timestamp -- specify the start of finish time of trial Keyword Arguments: trial -- limit query to a specific trial Check if trial has finished Return initial activation as a SQLAlchemy query Iterate on all access to a path Return last trial according to start time Keyword arguments: script -- specify the desired script (default=None) parent_required -- valid only if script exists (default=False) Load head trial by script Keyword arguments: remove -- remove from head, after loading (default=True) parent_required -- valid only if script exists (default=False) session -- specify session for loading (default=relational.session) Load trial by trial reference Find reference on trials id and tags name Load local modules. Return SQLAlchemy query Check if trial scripts matches Check if trial statuses matches Load modules. Return SQLAlchemy query Return filtered prolog variables Run prolog query Return a generator with <limit> trials ordered by start time desc Return the "main" script content of the trial Print trial information Check trial status Possible statuses: finished, unfinished, backup Create trial and assign a new id to it Use core sqlalchemy Arguments: start -- trial start time script -- script name code_hash -- script hash code arguments -- trial arguments bypass_modules -- whether it captured modules or not command -- the full command line with noWorkflow parametes run -- trial created by the run command Keyword arguments: session -- specify session for loading (default=relational.session) Return start date as string Return start date as string Find first files accessed in a trial Return map with relative path -> (code_hash, type) Possible types: script, module, access Trial Model Copyright (c) 2016 Universidade Federal Fluminense (UFF) Copyright (c) 2016 Polytechnic Institute of New York University. This file is part of noWorkflow. Please, consult the license terms in the LICENSE file. pylint: disable=too-many-public-methods pylint: disable=invalid-name Trial.inherited Trial.parent pylint: disable=no-member Check if it is a new trial or a queryself._store_pk(obj)self._restore_instance() pylint: disable=not-an-iterable pylint: disable=not-an-iterable pylint: disable=no-member pylint: disable=no-member, not-callable pylint: disable=no-member pylint: disable=protected-access query query query query query avoid circular import query query query query pylint: disable=too-many-arguments ToDo: use core query query query
4,126
en
0.602481
import Tkinter as tk class Combobox(tk.Label): def __init__(self, master, choices=[], default=None, direction="down", arrowimage="default", **kwargs): style = {"relief": "groove", "bg":"white"} style.update(kwargs) tk.Label.__init__(self, master, **style) # options if direction not in ("down","up"): raise Exception("Direction must be either down or up") self.direction = direction self.choices = choices # entry self.entry = tk.Entry(self, bg=style["bg"], borderwidth=0) self.entry.pack(side="left", fill="y") if default != None: self.entry.insert(0, default) # dropdown arrow if arrowimage == "default": arrowimage = tk.PhotoImage(file="dropdown.gif") else: pass # image should be passed as a Photoimage self.arrow = tk.Label(self, bg=style["bg"], image=arrowimage) self.arrow.img = arrowimage self.arrow.pack(side="right") self.arrow.bind("<Button-1>", self.dropdown) def dropdown(self, event=None): self.arrow["relief"] = "sunken" self.entry.focus_force() self.entry.select_range(0, tk.END) menu = tk.Menu(self.entry, tearoff=0, bg="white") def changeentry(choice): self.entry.delete(0, tk.END) self.entry.insert(0, choice) self.rollup() if self.direction == "down": choices = self.choices elif self.direction == "up": choices = list(reversed(self.choices)) for choice in choices: menu.add_command(label=repr(choice).ljust(30), command=lambda x=choice: changeentry(x)) x = self.entry.winfo_rootx() if self.direction == "down": y = self.entry.winfo_rooty() + self.entry.winfo_height() elif self.direction == "up": y = self.entry.winfo_rooty() - menu.yposition(0) #menu.winfo_height() menu.post(x, y) def rollup(self, event=None): self.arrow["relief"] = "flat" if __name__ == "__main__": win = tk.Tk() OPTIONS = range(20) cbox = Combobox(win, choices=OPTIONS, default=12, direction="down") cbox.pack(side="left") cbox2 = Combobox(win, choices=OPTIONS, default=24, direction="up") cbox2.pack(side="left") win.mainloop()
dependencies/generate maps/pythongis/app/tk2/_othermisc/dropdown_works.py
2,337
options entry dropdown arrow image should be passed as a Photoimagemenu.winfo_height()
86
en
0.565041
#------------------Bombermans Team---------------------------------# # Author : B3mB4m # Concat : b3mb4m@protonmail.com # Project : https://github.com/b3mb4m/Shellsploit # LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE #------------------------------------------------------------------# import sys import os from .core.color import * from re import findall from .core.Comp import tab from lib.base.framework import ShellsploitFramework if sys.version_info.major >= 3: raw_input = input class B3mB4m(ShellsploitFramework): def __init__(self): ShellsploitFramework.__init__(self) self.argvlist = ["None", "None", "None", "None"] self.disassembly = "None" self.mycache = "None" def control(self, string): bash = bcolors.OKBLUE + bcolors.UNDERLINE + "ssf" + bcolors.ENDC bash += ":" bash += bcolors.RED + string + bcolors.ENDC bash += bcolors.OKBLUE + " > " + bcolors.ENDC try: terminal = raw_input(bash) except KeyboardInterrupt: B3mB4m.exit("\n[*] (Ctrl + C ) Detected, Trying To Exit ...") # Injectors if string[:9] == "injectors": tab.completion("injectors") if terminal[:4] == "help": from .core.help import injectorhelp injectorhelp() self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass # elif terminal[:9] == "need help": # import XX # print youtubelink for this module elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:4] == "pids": B3mB4m.pids("wholelist") self.control(string) elif terminal[:6] == "getpid": B3mB4m.pids(None, terminal[7:]) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:5] == "unset": if string in B3mB4m.bfdlist(): if terminal[6:] == "exe" or terminal[6:] == "file": self.argvlist[0] = "None" elif terminal[6:] == "host": self.argvlist[1] = "None" elif terminal[6:] == "port": self.argvlist[2] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/tLsInjectorDLL": if terminal[6:] == "exe": self.argvlist[0] = "None" elif terminal[6:] == "dll": self.argvlist[1] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/CodecaveInjector": if terminal[6:] == "exe": self.argvlist[0] = "None" elif terminal[6:] == "shellcode": self.argvlist[1] = "None" else: if terminal[6:] == "pid": self.argvlist[0] = "None" elif terminal[6:] == "shellcode": self.argvlist[1] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:3] == "set": if string in B3mB4m.bfdlist(): if terminal[4:7] == "exe" or terminal[4:8] == "file": self.argvlist[0] = terminal[9:] elif terminal[4:8] == "host": self.argvlist[1] = terminal[9:] elif terminal[4:8] == "port": self.argvlist[2] = terminal[9:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/tLsInjectorDLL": if terminal[4:7] == "exe": self.argvlist[0] = terminal[8:] elif terminal[4:7] == "dll": self.argvlist[1] = terminal[8:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) elif string == "injectors/Windows/x86/CodecaveInjector": if terminal[4:7] == "exe": self.argvlist[0] = terminal[8:] elif terminal[4:13] == "shellcode": self.argvlist[1] = terminal[14:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) else: if terminal[4:7] == "pid": self.argvlist[0] = terminal[8:] elif terminal[4:13] == "shellcode": if ".txt" in terminal[14:]: if os.path.isfile(terminal[14:]): with open(terminal[14:], "r") as shellcode: cache = shellcode.readlines() db = "" for x in database: db += x.strip().replace('"', "").replace('+', "").strip() self.argvlist[1] = db else: print(bcolors.RED + bcolors.BOLD + "\nFile can't find, please try with full path.\n" + bcolors.ENDC) self.control(string) else: self.argvlist[1] = terminal[14:] else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:14] == "show shellcode": if string in B3mB4m.bfdlist(): print("This option not available for this module.") self.control(string) elif string == "injectors/Windowsx86/tLsInjectorDLL": self.control(string) else: if self.argvlist[1] != "None": B3mB4m.prettyout(self.argvlist[1]) else: print("\nYou must set shellcode before this ..\b") self.control(string) elif terminal[:12] == "show options": from .core.Injectoroptions import controlset if string in B3mB4m.bfdlist(): controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) self.control(string) else: if string != "injectors/Windows/x86/tLsInjectorDLL": if self.argvlist[1] != "None": self.mycache = "process" controlset(string, self.argvlist[0], self.mycache) self.control(string) controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:6] == "inject": if self.argvlist[0] == None or self.argvlist[1] == None: print("\nYou must set pid/shellcode before inject !\n") self.control(string) if string == "injectors/Linux86/ptrace": from .inject.menager import linux86ptrace linux86ptrace(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Linux64/ptrace": from .inject.menager import linux64ptrace linux64ptrace(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/byteman": from .inject.menager import windows windows(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/x86/tLsInjectorDLL": from .inject.menager import winx86tLsDLL winx86tLsDLL(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/x86/CodecaveInjector": from .inject.menager import winx86Codecave winx86Codecave(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/Dllinjector": from .inject.menager import winDLL winDLL(self.argvlist[0], self.argvlist[1]) elif string == "injectors/Windows/BFD/Patching": from .inject.menager import winBFD winBFD(self.argvlist[0], self.argvlist[1], int(self.argvlist[2])) # elif string == "injectors/MacOSX/BFD/Patching": # from .inject.menager import MacBFD # MacBFD( FILE, HOST, PORT) # elif string == "injectors/Linux/BFD/Patching": # from .inject.menager import LinuxBFD # LinuxBFD( FILE, HOST, PORT) # elif string == "injectors/Linux/ARM/x86/BFD/Patching": # from .inject.menager import LinuxARMx86BFD # LinuxARMx86BFD( FILE, HOST, PORT) # elif string == "FreeBSD/x86/BFD/Patching": # from .inject.menager import FreeBSDx86 # FreeBSDx86( FILE, HOST, PORT) self.control(string) # elif terminal[:7] == "extract": # Future option # Make it executable (Dynamic virus land) # from bla bla import executable # generator() elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) # Backdoors elif string[:9] == "backdoors": tab.completion("backdoors") if terminal[:4] == "help": from .core.help import backdoorshelp backdoorshelp() self.control(string) elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:12] == "show options": from .core.SHELLoptions import controlset controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:5] == "unset": if terminal[6:] == "lhost": self.argvlist[0] = "None" elif terminal[6:] == "lport": self.argvlist[1] = "None" # elif terminal[6:] == "encoder": # self.argvlist[2] = "None" else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:3] == "set": if terminal[4:9].lower() == "lhost": self.argvlist[0] = terminal[10:] elif terminal[4:9].lower() == "lport": self.argvlist[1] = terminal[10:] # elif terminal[4:11].lower() == "encoder" # self.argvlist[2] = terminal[11:] else: print(bcolors.RED + bcolors.BOLD + "This option is not available." + bcolors.ENDC) self.control(string) elif terminal[:8] == "generate": from .Session.generator import process # Custom output path will be add .. if self.argvlist[0] == "None" or self.argvlist[1] == "None": print("\nSet options before generate payload.\n") self.control(string) else: process(data=string, HOST=self.argvlist[0], PORT=self.argvlist[1], ENCODER=False, logger=True) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string) # Shellcodes else: tab.completion("shellcodes") if terminal[:4] == "help": # if terminal[5:11] == "output": # from Outputs.exehelp import help # print help() # self.control( string) from .core.help import shellcodehelp shellcodehelp() self.control(string) elif terminal[:2] == "os": B3mB4m.oscommand(terminal[3:]) self.control(string) elif terminal[:4] == "back": self.argvlist = ["None", "None", "None", "None"] pass elif terminal[:4] == "exit": B3mB4m.exit("\nThanks for using shellsploit !\n") elif terminal[:10] == "whatisthis": from .core.whatisthis import whatisthis if "egg" in string: message = "Egg-hunt" elif "tcp" in string or "reverse" in string or "netcat" in string: message = "Remote" elif "download" in string: message = "Download and execute" else: message = "Local" # Add special part for particul whatisthis(message) self.control(string) elif terminal[:5] == "unset": if terminal[6:] == "encoder": self.argvlist[0] = "None" elif terminal[6:] == "iteration": self.argvlist[1] = "None" elif terminal[6:] == "file": if string in B3mB4m.readlist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "port": if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist(): self.argvlist[2] = "None" else: Base.invalidcommand() elif terminal[6:] == "command": if string in B3mB4m.execlist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "link": if string in B3mB4m.downloadandexecutelist(): self.argvlist[2] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "filename": if string in B3mB4m.downloadandexecutelist(): self.argvlist[3] = "None" else: B3mB4m.invalidcommand() elif terminal[6:] == "host": if string in B3mB4m.reversetcplist(): self.argvlist[3] = "None" else: B3mB4m.invalidcommand() else: B3mB4m.invalidcommand() self.control(string) elif terminal[:3] == "set": if terminal[4:8] == "file": if string in B3mB4m.readlist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "port": if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "command": if string in B3mB4m.execlist(): self.argvlist[2] = terminal[12:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "link": if string in B3mB4m.downloadandexecutelist(): self.argvlist[2] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "message": if string in B3mB4m.messageboxlist(): self.argvlist[2] = terminal[12:] else: B3mB4m.invalidcommand() elif terminal[4:8] == "host": if string in B3mB4m.reversetcplist(): self.argvlist[3] = terminal[9:] else: B3mB4m.invalidcommand() elif terminal[4:12] == "filename": if string in B3mB4m.downloadandexecutelist(): self.argvlist[3] = terminal[13:] else: B3mB4m.invalidcommand() elif terminal[4:11] == "encoder": from .core.lists import encoders if terminal[12:] not in encoders(): print("This encoder not in list !") self.control(string) self.argvlist[0] = terminal[12:] elif terminal[4:13] == "iteration": self.argvlist[1] = terminal[14:] else: B3mB4m.invalidcommand() self.control(string) elif terminal[:12] == "show options": from .core.SHELLoptions import controlset if string[:7] == "linux86": if string == "linux86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/chmod": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/download&exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux86/exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:10] == "solarisx86": if string == "solarisx86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "solarisx86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "solarisx86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:7] == "linux64": if string == "linux64/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/mkdir": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "linux": if string == "linux/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "linux/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "osx86": if string == "osx86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "osx86/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:5] == "osx64": if string == "osx64/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "osx64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:11] == "freebsd_x86": if string == "freebsd_x86/reverse_tcp2": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/reverse_tcp": controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/read": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/exec": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x86/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:11] == "freebsd_x64": if string == "freebsd_x64/tcp_bind": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "freebsd_x64/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) elif string == "freebsd_x64/exec": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:9] == "linux_arm": if string == "linux_arm/chmod": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_arm/exec": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_arm/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:10] == "linux_mips": if string == "linux_mips/chmod": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) elif string == "linux_mips/reverse_tcp": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "linux_mips/tcp_bind": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2]) else: controlset(string, self.argvlist[0], self.argvlist[1]) self.control(string) elif string[:7] == "windows": if string == "windows/messagebox": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) elif string == "windows/exec": controlset(string, self.argvlist[1], self.argvlist[0], self.argvlist[2]) elif string == "windows/download&execute": controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3]) elif string == "windows/reverse_tcp": controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) elif string == "windows/tcp_bind": controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1]) self.control(string) elif terminal[:8] == "generate": from .database.generator import generator if string[:7] == "linux86": if string == "linux86/binsh_spawn": self.disassembly = generator("linux86", "binsh_spawn") elif string == "linux86/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux86", "read", FILE=self.argvlist[2]) elif string == "linux86/exec": if self.argvlist[2] == "None": print("\nCommand must be declared.\n") self.control(string) self.disassembly = generator("linux86", "exec", COMMAND=self.argvlist[2]) elif string == "linux86/download&exec": if self.argvlist[2] == "None": print("\nLink must be declared.\n") self.control(string) elif "/" not in self.argvlist[2]: print("\nWrong url format example : 127.0.0.1/X\n") self.control(string) elif len(self.argvlist[2].split("/")[-1]) != 1: print("\nYour filename must be one lenght ..\n") self.control(string) if "http" in self.argvlist[2] or "https" in self.argvlist[2] or "www." in self.argvlist: try: edit = self.argvlist[2].replace("http://", "").replace("https://", "").replace("www.", "") self.argvlist[2] = edit except: pass self.disassembly = generator("linux86", "download&exec", URL=self.argvlist[2]) elif string == "linux86/chmod": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux86", "chmod", FILE=self.argvlist[2]) elif string == "linux86/tcp_bind": if self.argvlist[2] == "None": print("\nPORT must be declared.\n") self.control(string) self.disassembly = generator("linux86", "tcp_bind", port=self.argvlist[2]) elif string == "linux86/reverse_tcp": if self.argvlist[2] == "None" or self.argvlist[3] == "None": print("\nHost&Port must be declared.\n") self.control(string) self.disassembly = generator("linux86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:7] == "linux64": if string == "linux64/binsh_spawn": self.disassembly = generator("linux64", "binsh_spawn") elif string == "linux64/tcp_bind": self.disassembly = generator("linux64", "tcp_bind", port=self.argvlist[2]) elif string == "linux64/reverse_tcp": self.disassembly = generator("linux64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux64/read": self.disassembly = generator("linux64", "read", FILE=self.argvlist[2]) if string[:5] == "linux": if string == "linux/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("linux", "read", FILE=self.argvlist[2]) elif string == "linux/binsh_spawn": self.disassembly = generator("linux", "binsh_spawn") elif string == "linux/tcp_bind": self.disassembly = generator("linux", "tcp_bind", port=self.argvlist[2]) elif string == "linux/reverse_tcp": self.disassembly = generator("linux", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:5] == "osx86": if string == "osx86/tcp_bind": self.disassembly = generator("osx86", "tcp_bind", port=self.argvlist[2]) elif string == "osx86/binsh_spawn": self.disassembly = generator("osx86", "binsh_spawn") elif string == "osx86/reverse_tcp": self.disassembly = generator("osx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:5] == "osx64": if string == "osx64/binsh_spawn": self.disassembly = generator("osx64", "binsh_spawn") elif string == "osx64/tcp_bind": self.disassembly = generator("osx64", "tcp_bind", port=self.argvlist[2]) elif string == "osx64/reverse_tcp": self.disassembly = generator("osx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string[:11] == "freebsd_x86": if string == "freebsd_x86/binsh_spawn": self.disassembly = generator("freebsdx86", "binsh_spawn") elif string == "freebsd_x86/read": self.disassembly = generator("freebsdx86", "read", FILE=self.argvlist[2]) elif string == "freebsd_x86/reverse_tcp": self.disassembly = generator("freebsdx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x86/reverse_tcp2": self.disassembly = generator("freebsdx86", "reverse_tcp2", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x86/exec": self.disassembly = generator("freebsdx86", "exec", COMMAND=self.argvlist[2]) elif string == "freebsd_x86/tcp_bind": self.disassembly = generator("freebsdx86", "tcp_bind", port=self.argvlist[2]) elif string[:11] == "freebsd_x64": if string == "freebsd_x64/binsh_spawn": self.disassembly = generator("freebsdx64", "binsh_spawn") elif string == "freebsd_x64/tcp_bind": self.disassembly = generator("freebsdx64", "tcp_bind", port=self.argvlist[2], PASSWORD=self.argvlist[3]) elif string == "freebsd_x64/reverse_tcp": self.disassembly = generator("freebsdx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "freebsd_x64/exec": self.disassembly = generator("freebsdx64", "exec", COMMAND=self.argvlist[2]) elif string[:9] == "linux_arm": if string == "linux_arm/chmod": self.disassembly = generator("linux_arm", "chmod", FILE=self.argvlist[2]) elif string == "linux_arm/binsh_spawn": self.disassembly = generator("linux_arm", "binsh_spawn") elif string == "linux_arm/reverse_tcp": self.disassembly = generator("linux_arm", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux_arm/exec": self.disassembly = generator("linux_arm", "exec", COMMAND=self.argvlist[2]) elif string[:10] == "linux_mips": if string == "linux_mips/reverse_tcp": self.disassembly = generator("linux_mips", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "linux_mips/binsh_spawn": self.disassembly = generator("linux_mips", "binsh_spawn") elif string == "linux_mips/chmod": self.disassembly = generator("linux_mips", "chmod", FILE=self.argvlist[2]) elif string == "linux_mips/tcp_bind": self.disassembly = generator("linux_mips", "tcp_bind", port=self.argvlist[2]) elif string[:7] == "windows": if string == "windows/messagebox": self.disassembly = generator("windows", "messagebox", MESSAGE=self.argvlist[2]) elif string == "windows/download&execute": self.disassembly = generator("windows", "downloandandexecute", URL=self.argvlist[2], FILENAME=self.argvlist[3]) elif string == "windows/exec": self.disassembly = generator("windows", "exec", COMMAND=self.argvlist[2]) elif string == "windows/reverse_tcp": self.disassembly = generator("windows", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "windows/tcp_bind": self.disassembly = generator("windows", "tcp_bind", port=self.argvlist[2]) elif string[:10] == "solarisx86": if string == "solarisx86/binsh_spawn": self.disassembly = generator("solarisx86", "binsh_spawn") elif string == "solarisx86/read": if self.argvlist[2] == "None": print("\nFile name must be declared.\n") self.control(string) self.disassembly = generator("solarisx86", "read", FILE=self.argvlist[2]) elif string == "solarisx86/reverse_tcp": self.disassembly = generator("solarisx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2]) elif string == "solarisx86/tcp_bind": self.disassembly = generator("solarisx86", "tcp_bind", port=self.argvlist[2]) if self.argvlist[0] == "x86/xor_b3m": from .encoders.shellcode.xor_b3m import prestart if self.argvlist[1] == "None": self.argvlist[1] = 1 elif self.argvlist[1] == 0: self.argvlist[1] = 1 self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1])) elif self.argvlist[0] == "x86/xor": from .encoders.shellcode.xor import prestart if self.argvlist[1] == "None": self.argvlist[1] = 1 elif self.argvlist[1] == 0: self.argvlist[1] = 1 self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1])) else: self.disassembly = self.disassembly # print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex")))) B3mB4m.prettyout(self.disassembly) self.control(string) elif terminal[:6] == "output": if self.disassembly == "None": print("Please generate shellcode before save it.") self.control(string) # I'm not sure about this option, should I get this option with params # Or directly inputs ? .. if terminal[7:10].lower() == "exe": # Will be add missing parts .. if "linux86" in terminal.lower(): OS = "linux86" elif "linux64" in terminal.lower(): OS = "linux64" elif "windows" in terminal.lower(): OS = "windows" elif "freebsdx86" in terminal.lower(): OS = "freebsdx86" elif "freebsdx64" in terminal.lower(): OS = "freebsdx64" elif "openbsdx86" in terminal.lower(): OS = "openbsdx86" elif "solarisx86" in terminal.lower(): OS = "solarisx86" elif "linuxpowerpc" in terminal.lower(): OS = "linuxpowerpc" elif "openbsdpowerpc" in terminal.lower(): OS = "openbsdpowerpc" elif "linuxsparc" in terminal.lower(): OS = "linuxsparc" elif "freebsdsparc" in terminal.lower(): OS = "freebsdsparc" elif "openbsdsparc" in terminal.lower(): OS = "openbsdsparc" elif "solarissparc" in terminal.lower(): OS = "solarissparc" elif "linuxarm" in terminal.lower(): OS = "linuxarm" elif "freebsdarm" in terminal.lower(): OS = "freebsdarm" elif "openbsdarm" in terminal.lower(): OS = "openbsdarm" else: OS = None from .Outputs.exe import ExeFile ExeFile(self.disassembly, OS) self.control(string) elif terminal[7:10].lower() == "c++" or terminal[7:10].lower() == "cpp": from .Outputs.Cplusplus import CplusplusFile if "windows" in string: CplusplusFile(self.disassembly, True) else: CplusplusFile(self.disassembly) elif terminal[7:8].lower() == "c": if "windows" in string: from .Outputs.Cplusplus import CplusplusFile CplusplusFile(self.disassembly, True) else: from .Outputs.C import CFile CFile(self.disassembly) elif terminal[7:9].lower() == "py" or terminal[7:13].lower() == "python": from .Outputs.python import PyFile PyFile(self.disassembly) elif terminal[7:10].lower() == "txt": from .Outputs.txt import TxtFile TxtFile(self.disassembly) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown output type: {0}".format(terminal) + bcolors.ENDC) self.control(string) elif terminal[:5] == "clear": B3mB4m.clean() self.control(string) elif terminal[:2].lower() == "ip": B3mB4m.IP() self.control(string) elif terminal[:13] == "show encoders": from .core.lists import encoderlist encoderlist() self.control(string) elif terminal[:5] == "disas": B3mB4m().startdisas( self.disassembly, string) self.control(string) else: if not terminal: self.control(string) else: print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC) self.control(string)
shell/control.py
44,167
------------------Bombermans Team--------------------------------- Author : B3mB4m Concat : b3mb4m@protonmail.com Project : https://github.com/b3mb4m/Shellsploit LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE------------------------------------------------------------------ Injectors elif terminal[:9] == "need help": import XX print youtubelink for this module elif string == "injectors/MacOSX/BFD/Patching": from .inject.menager import MacBFD MacBFD( FILE, HOST, PORT) elif string == "injectors/Linux/BFD/Patching": from .inject.menager import LinuxBFD LinuxBFD( FILE, HOST, PORT) elif string == "injectors/Linux/ARM/x86/BFD/Patching": from .inject.menager import LinuxARMx86BFD LinuxARMx86BFD( FILE, HOST, PORT) elif string == "FreeBSD/x86/BFD/Patching": from .inject.menager import FreeBSDx86 FreeBSDx86( FILE, HOST, PORT) elif terminal[:7] == "extract": Future option Make it executable (Dynamic virus land) from bla bla import executable generator() Backdoors elif terminal[6:] == "encoder": self.argvlist[2] = "None" elif terminal[4:11].lower() == "encoder" self.argvlist[2] = terminal[11:] Custom output path will be add .. Shellcodes if terminal[5:11] == "output": from Outputs.exehelp import help print help() self.control( string) Add special part for particul print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex")))) I'm not sure about this option, should I get this option with params Or directly inputs ? .. Will be add missing parts ..
1,578
en
0.419671
import numpy as np from .State import State from .Action import Action ''' Includes blood glucose level proxy for diabetes: 0-3 (lo2, lo1, normal, hi1, hi2); Any other than normal is "abnormal" Initial distribution: [.05, .15, .6, .15, .05] for non-diabetics and [.01, .05, .15, .6, .19] for diabetics Effect of vasopressors on if diabetic: raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4 raise blood glucose by 1 w.p. .5 Effect of vasopressors off if diabetic: blood pressure falls by 1 w.p. .05 instead of .1 glucose does not fall - apply fluctuations below instead Fluctuation in blood glucose levels (IV/insulin therapy are not possible actions): fluctuate w.p. .3 if diabetic fluctuate w.p. .1 if non-diabetic Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4530321/ Additional fluctuation regardless of other changes This order is applied: antibiotics, ventilation, vasopressors, fluctuations ''' class MDP(object): def __init__(self, init_state_idx=None, init_state_idx_type='obs', policy_array=None, policy_idx_type='obs', p_diabetes=0.2): ''' initialize the simulator ''' assert p_diabetes >= 0 and p_diabetes <= 1, \ "Invalid p_diabetes: {}".format(p_diabetes) assert policy_idx_type in ['obs', 'full', 'proj_obs'] # Check the policy dimensions (states x actions) if policy_array is not None: assert policy_array.shape[1] == Action.NUM_ACTIONS_TOTAL if policy_idx_type == 'obs': assert policy_array.shape[0] == State.NUM_OBS_STATES elif policy_idx_type == 'full': assert policy_array.shape[0] == \ State.NUM_HID_STATES * State.NUM_OBS_STATES elif policy_idx_type == 'proj_obs': assert policy_array.shape[0] == State.NUM_PROJ_OBS_STATES # p_diabetes is used to generate random state if init_state is None self.p_diabetes = p_diabetes self.state = None # Only need to use init_state_idx_type if you are providing a state_idx! self.state = self.get_new_state(init_state_idx, init_state_idx_type) self.policy_array = policy_array self.policy_idx_type = policy_idx_type # Used for mapping the policy to actions def get_new_state(self, state_idx = None, idx_type = 'obs', diabetic_idx = None): ''' use to start MDP over. A few options: Full specification: 1. Provide state_idx with idx_type = 'obs' + diabetic_idx 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx* * This option will set glucose to a normal level Random specification 4. State_idx, no diabetic_idx: Latter will be generated 5. No state_idx, no diabetic_idx: Completely random 6. No state_idx, diabetic_idx given: Random conditional on diabetes ''' assert idx_type in ['obs', 'full', 'proj_obs'] option = None if state_idx is not None: if idx_type == 'obs' and diabetic_idx is not None: option = 'spec_obs' elif idx_type == 'obs' and diabetic_idx is None: option = 'spec_obs_no_diab' diabetic_idx = np.random.binomial(1, self.p_diabetes) elif idx_type == 'full': option = 'spec_full' elif idx_type == 'proj_obs' and diabetic_idx is not None: option = 'spec_proj_obs' elif state_idx is None and diabetic_idx is None: option = 'random' elif state_idx is None and diabetic_idx is not None: option = 'random_cond_diab' assert option is not None, "Invalid specification of new state" if option in ['random', 'random_cond_diab']: init_state = self.generate_random_state(diabetic_idx) # Do not start in death or discharge state while init_state.check_absorbing_state(): init_state = self.generate_random_state(diabetic_idx) else: # Note that diabetic_idx will be ignored if idx_type = 'full' init_state = State( state_idx=state_idx, idx_type=idx_type, diabetic_idx=diabetic_idx) return init_state def generate_random_state(self, diabetic_idx=None): # Note that we will condition on diabetic idx if provided if diabetic_idx is None: diabetic_idx = np.random.binomial(1, self.p_diabetes) # hr and sys_bp w.p. [.25, .5, .25] hr_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25])) sysbp_state = np.random.choice(np.arange(3), p=np.array([.25, .5, .25])) # percoxyg w.p. [.2, .8] percoxyg_state = np.random.choice(np.arange(2), p=np.array([.2, .8])) if diabetic_idx == 0: glucose_state = np.random.choice(np.arange(5), \ p=np.array([.05, .15, .6, .15, .05])) else: glucose_state = np.random.choice(np.arange(5), \ p=np.array([.01, .05, .15, .6, .19])) antibiotic_state = 0 vaso_state = 0 vent_state = 0 state_categs = [hr_state, sysbp_state, percoxyg_state, glucose_state, antibiotic_state, vaso_state, vent_state] return State(state_categs=state_categs, diabetic_idx=diabetic_idx) def transition_antibiotics_on(self): ''' antibiotics state on heart rate, sys bp: hi -> normal w.p. .5 ''' self.state.antibiotic_state = 1 if self.state.hr_state == 2 and np.random.uniform(0,1) < 0.5: self.state.hr_state = 1 if self.state.sysbp_state == 2 and np.random.uniform(0,1) < 0.5: self.state.sysbp_state = 1 def transition_antibiotics_off(self): ''' antibiotics state off if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1 ''' if self.state.antibiotic_state == 1: if self.state.hr_state == 1 and np.random.uniform(0,1) < 0.1: self.state.hr_state = 2 if self.state.sysbp_state == 1 and np.random.uniform(0,1) < 0.1: self.state.sysbp_state = 2 self.state.antibiotic_state = 0 def transition_vent_on(self): ''' ventilation state on percent oxygen: low -> normal w.p. .7 ''' self.state.vent_state = 1 if self.state.percoxyg_state == 0 and np.random.uniform(0,1) < 0.7: self.state.percoxyg_state = 1 def transition_vent_off(self): ''' ventilation state off if ventilation was on: percent oxygen: normal -> lo w.p. .1 ''' if self.state.vent_state == 1: if self.state.percoxyg_state == 1 and np.random.uniform(0,1) < 0.1: self.state.percoxyg_state = 0 self.state.vent_state = 0 def transition_vaso_on(self): ''' vasopressor state on for non-diabetic: sys bp: low -> normal, normal -> hi w.p. .7 for diabetic: raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4 raise blood glucose by 1 w.p. .5 ''' self.state.vaso_state = 1 if self.state.diabetic_idx == 0: if np.random.uniform(0,1) < 0.7: if self.state.sysbp_state == 0: self.state.sysbp_state = 1 elif self.state.sysbp_state == 1: self.state.sysbp_state = 2 else: if self.state.sysbp_state == 1: if np.random.uniform(0,1) < 0.9: self.state.sysbp_state = 2 elif self.state.sysbp_state == 0: up_prob = np.random.uniform(0,1) if up_prob < 0.5: self.state.sysbp_state = 1 elif up_prob < 0.9: self.state.sysbp_state = 2 if np.random.uniform(0,1) < 0.5: self.state.glucose_state = min(4, self.state.glucose_state + 1) def transition_vaso_off(self): ''' vasopressor state off if vasopressor was on: for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1 for diabetics, blood pressure falls by 1 w.p. .05 instead of .1 ''' if self.state.vaso_state == 1: if self.state.diabetic_idx == 0: if np.random.uniform(0,1) < 0.1: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) else: if np.random.uniform(0,1) < 0.05: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) self.state.vaso_state = 0 def transition_fluctuate(self, hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \ glucose_fluctuate): ''' all (non-treatment) states fluctuate +/- 1 w.p. .1 exception: glucose flucuates +/- 1 w.p. .3 if diabetic ''' if hr_fluctuate: hr_prob = np.random.uniform(0,1) if hr_prob < 0.1: self.state.hr_state = max(0, self.state.hr_state - 1) elif hr_prob < 0.2: self.state.hr_state = min(2, self.state.hr_state + 1) if sysbp_fluctuate: sysbp_prob = np.random.uniform(0,1) if sysbp_prob < 0.1: self.state.sysbp_state = max(0, self.state.sysbp_state - 1) elif sysbp_prob < 0.2: self.state.sysbp_state = min(2, self.state.sysbp_state + 1) if percoxyg_fluctuate: percoxyg_prob = np.random.uniform(0,1) if percoxyg_prob < 0.1: self.state.percoxyg_state = max(0, self.state.percoxyg_state - 1) elif percoxyg_prob < 0.2: self.state.percoxyg_state = min(1, self.state.percoxyg_state + 1) if glucose_fluctuate: glucose_prob = np.random.uniform(0,1) if self.state.diabetic_idx == 0: if glucose_prob < 0.1: self.state.glucose_state = max(0, self.state.glucose_state - 1) elif glucose_prob < 0.2: self.state.glucose_state = min(1, self.state.glucose_state + 1) else: if glucose_prob < 0.3: self.state.glucose_state = max(0, self.state.glucose_state - 1) elif glucose_prob < 0.6: self.state.glucose_state = min(4, self.state.glucose_state + 1) def calculateReward(self): num_abnormal = self.state.get_num_abnormal() if num_abnormal >= 3: return -1 elif num_abnormal == 0 and not self.state.on_treatment(): return 1 return 0 def transition(self, action): self.state = self.state.copy_state() if action.antibiotic == 1: self.transition_antibiotics_on() hr_fluctuate = False sysbp_fluctuate = False elif self.state.antibiotic_state == 1: self.transition_antibiotics_off() hr_fluctuate = False sysbp_fluctuate = False else: hr_fluctuate = True sysbp_fluctuate = True if action.ventilation == 1: self.transition_vent_on() percoxyg_fluctuate = False elif self.state.vent_state == 1: self.transition_vent_off() percoxyg_fluctuate = False else: percoxyg_fluctuate = True glucose_fluctuate = True if action.vasopressors == 1: self.transition_vaso_on() sysbp_fluctuate = False glucose_fluctuate = False elif self.state.vaso_state == 1: self.transition_vaso_off() sysbp_fluctuate = False self.transition_fluctuate(hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, \ glucose_fluctuate) return self.calculateReward() def select_actions(self): assert self.policy_array is not None probs = self.policy_array[ self.state.get_state_idx(self.policy_idx_type) ] aev_idx = np.random.choice(np.arange(Action.NUM_ACTIONS_TOTAL), p=probs) return Action(action_idx = aev_idx)
sepsisSimDiabetes/MDP.py
12,519
initialize the simulator use to start MDP over. A few options: Full specification: 1. Provide state_idx with idx_type = 'obs' + diabetic_idx 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx* * This option will set glucose to a normal level Random specification 4. State_idx, no diabetic_idx: Latter will be generated 5. No state_idx, no diabetic_idx: Completely random 6. No state_idx, diabetic_idx given: Random conditional on diabetes antibiotics state off if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1 antibiotics state on heart rate, sys bp: hi -> normal w.p. .5 all (non-treatment) states fluctuate +/- 1 w.p. .1 exception: glucose flucuates +/- 1 w.p. .3 if diabetic vasopressor state off if vasopressor was on: for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1 for diabetics, blood pressure falls by 1 w.p. .05 instead of .1 vasopressor state on for non-diabetic: sys bp: low -> normal, normal -> hi w.p. .7 for diabetic: raise blood pressure: normal -> hi w.p. .9, lo -> normal w.p. .5, lo -> hi w.p. .4 raise blood glucose by 1 w.p. .5 ventilation state off if ventilation was on: percent oxygen: normal -> lo w.p. .1 ventilation state on percent oxygen: low -> normal w.p. .7 Check the policy dimensions (states x actions) p_diabetes is used to generate random state if init_state is None Only need to use init_state_idx_type if you are providing a state_idx! Used for mapping the policy to actions Do not start in death or discharge state Note that diabetic_idx will be ignored if idx_type = 'full' Note that we will condition on diabetic idx if provided hr and sys_bp w.p. [.25, .5, .25] percoxyg w.p. [.2, .8]
1,772
en
0.747253
import random import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error as mse from sklearn import datasets import unittest import torch from fastai.basic_train import Learner from fastai.callbacks import OneCycleScheduler from fastai.basic_data import DatasetType from dies.data import ( ds_from_df_from_dtypes, scale_datasets, create_databunch, ds_from_df, ) from dies import data from dies.mlp import MultiLayerPeceptron from dies.embedding import Embedding from dies.utils_pytorch import dev_to_np, xavier_init_uniform from dies.autoencoder import Autoencoder random_state = 0 def set_random_states(): torch.manual_seed(random_state) np.random.seed(random_state) random.seed(random_state) def get_df(): X, y, _ = datasets.make_regression( n_samples=50, n_features=2, bias=1000, n_informative=2, noise=10, coef=True, random_state=42, ) df1 = pd.DataFrame( data=np.concatenate([X, y.reshape(-1, 1)], axis=1), columns=["feat1", "feat2", "target"], ) cats = np.random.randint(low=0, high=10, size=(df1.shape[0], 2)) df1["cat_1"] = cats[:, 0] df1["cat_2"] = cats[:, 1] index1 = pd.date_range("2000-01-01", "2000-06-01", periods=df1.shape[0]) index1 = pd.to_datetime(index1, utc=True) df1.index = index1 return df1 class TestMLP(unittest.TestCase): def setUp(self): n_features = 3 device = "cpu" df = get_df() ds = ds_from_df_from_dtypes(df, "target") self.ds_tr, self.ds_val, _ = data.train_test_split_dataset(ds) self.db = create_databunch( self.ds_tr, self.ds_val, None, batch_size=40, device="cpu" ) set_random_states() def test_simple_mlp(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=None, dropout=0.1 ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_mlp_with_yrange(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() y_ranges = self.ds_tr.y_ranges ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=None, dropout=0.1, y_ranges=y_ranges, ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1, lr=0.1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_simple_mlp_with_embedding(self): input_size = self.ds_tr.x.shape[1] df_tr = self.ds_tr.to_df() embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = MultiLayerPeceptron( input_size, ann_structure=[2, 1], embedding_module=embedding_module, dropout=0.1, ) ann_model.apply(xavier_init_uniform) learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_init = mse(df_tr.target, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr.target, y_hat) self.assertLess(e_end, e_init) def test_true(self): self.assertTrue(True) class TestAE(unittest.TestCase): def setUp(self): n_features = 3 device = "cpu" self.df = get_df() self.df.drop("target", axis=1, inplace=True) set_random_states() def test_simple_ae(self): cols = ["feat1", "feat2"] ds = ds_from_df(self.df, y_columns=cols, x_columns=cols) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] ann_model = Autoencoder(input_size=input_size, ann_structure=ann_structure) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_yranges(self): cols = ["feat1", "feat2"] ds = ds_from_df(self.df, y_columns=cols, x_columns=cols) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] y_ranges = ds_tr.y_ranges ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, y_ranges=y_ranges ) ann_model.apply(xavier_init_uniform) set_random_states() learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_embedding_and_yrange(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() y_ranges = ds_tr.y_ranges input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="start", y_ranges=y_ranges, ) set_random_states() ann_model.apply(xavier_init_uniform) set_random_states() learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) # adds some small tolerance self.assertLess( e_end, e_init + 0.05, ) def test_ae_with_embedding_at_start(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="start", ) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_ae_with_embedding_at_bottleneck(self): cols = ["feat1", "feat2"] ds = ds_from_df( self.df, y_columns=cols, x_columns=cols, cat_columns=["cat_1", "cat_2"] ) ds_tr, ds_val, _ = data.train_test_split_dataset(ds) db = create_databunch(ds_tr, ds_val, None, batch_size=40, device="cpu") df_tr = ds_tr.to_df() input_size = ds_tr.x.shape[1] print(input_size, ds_tr.y.shape[1]) ann_structure = [10, 4, 1] embedding_module = Embedding([11, 11], embedding_dropout=0.1) ann_model = Autoencoder( input_size=input_size, ann_structure=ann_structure, embedding_module=embedding_module, embeding_position="bottleneck", ) ann_model.apply(xavier_init_uniform) learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss()) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) target_cols = ["feat1_target", "feat2_target"] e_init = mse(df_tr[target_cols].values, y_hat) learn.fit(1, lr=0.1) y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train)) e_end = mse(df_tr[target_cols].values, y_hat) self.assertLess(e_end, e_init) def test_true(self): self.assertTrue(True)
dies/dies/tests/test_regression.py
10,313
adds some small tolerance
25
en
0.613665
import argparse import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from net.tf_net import \ calculate_accuracy, calculate_loss, \ create_simple_cnn_model, optimize_weights from net.keras_net import simple_cnn def train_keras(batch_size, epochs, n_classes): # x_train returns data with shape (60,000,28,28) # y_train returns data with shape (60,000,) (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # add one dimension for color chanel (only gray values) x_train = x_train.reshape(x_train.shape[0], image_height, image_width, 1) x_test = x_test.reshape(x_test.shape[0], image_height, image_width, 1) # define input shape of image input_shape = (image_height, image_width, 1) # convert tensor to float x_train = x_train.astype('float32') x_test = x_test.astype('float32') # normalize data: divide by 255 (max color value) to receive values between 0 and 1 x_train /= 255 x_test /= 255 # one-hot encoding: converts into array of length 'n_classes' and sets one where true # e.g. label = 5 y_train[4]=1, rest is 0 y_train = tf.keras.utils.to_categorical(y_train, n_classes) y_test = tf.keras.utils.to_categorical(y_test, n_classes) simple_cnn_model = simple_cnn(input_shape) simple_cnn_model.fit(x_train, y_train, batch_size, epochs, (x_test, y_test)) train_loss, train_accuracy = simple_cnn_model.evaluate( x_train, y_train, verbose=0) print('Train data loss:', train_loss) print('Train data accuracy:', train_accuracy) test_loss, test_accuracy = simple_cnn_model.evaluate( x_test, y_test, verbose=0) print('Test data loss:', test_loss) print('Test data accuracy:', test_accuracy) def train_tensorflow(batch_size, epochs, n_classes): mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True) test_images, test_labels = mnist_data.test.images, mnist_data.test.labels input_size = 784 # declare placeholder x_input = tf.placeholder(tf.float32, shape=[None, input_size]) y_input = tf.placeholder(tf.float32, shape=[None, n_classes]) # if test set dropout to false bool_dropout = tf.placeholder(tf.bool) # create neural net and receive logits logits = create_simple_cnn_model(x_input, y_input, bool_dropout) # calculate loss, optimize weights and calculate accuracy loss_operation = calculate_loss(logits, y_input) optimizer = optimize_weights(loss_operation) accuracy_operation = calculate_accuracy(logits, y_input) # start training session = tf.Session() session.run(tf.global_variables_initializer()) # merge all summary for tensorboard merged_summary_operation = tf.summary.merge_all() train_summary_writer = tf.summary.FileWriter('/tmp/train', session.graph) test_summary_writer = tf.summary.FileWriter('/tmp/test') for batch_n in range(epochs): mnist_batch = mnist_data.train.next_batch(batch_size) train_images, train_labels = mnist_batch[0], mnist_batch[1] _, merged_summary = session.run([optimizer, merged_summary_operation], feed_dict={ x_input: train_images, y_input: train_labels, bool_dropout: True }) train_summary_writer.add_summary(merged_summary, batch_n) if batch_n % 10 == 0: merged_summary, _ = session.run([merged_summary_operation, accuracy_operation], feed_dict={ x_input: test_images, y_input: test_labels, bool_dropout: False }) test_summary_writer.add_summary(merged_summary, batch_n) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Train a simple neural net to recognize number images from the MNIST dataset and appy the correct labeld') parser.add_argument('--epochs', default=200, help='Amount of batches the net trains on') parser.add_argument('--batch_size', default=100, help='Number of training samples inside one batch') parser.add_argument('--tf', default=True, help='Tensorflow or Keras implementation') args = parser.parse_args() if(args.tf): train_tensorflow(args.batch_size, args.epochs, 10) else: train_keras(args.batch_size, args.epochs, args.n_classes)
00_MNIST-label/train.py
4,526
x_train returns data with shape (60,000,28,28) y_train returns data with shape (60,000,) add one dimension for color chanel (only gray values) define input shape of image convert tensor to float normalize data: divide by 255 (max color value) to receive values between 0 and 1 one-hot encoding: converts into array of length 'n_classes' and sets one where true e.g. label = 5 y_train[4]=1, rest is 0 declare placeholder if test set dropout to false create neural net and receive logits calculate loss, optimize weights and calculate accuracy start training merge all summary for tensorboard
590
en
0.764545
#!/usr/bin/env python3 from matplotlib.patches import Circle, Rectangle, ConnectionPatch import matplotlib.pyplot as plt import numpy as np from matplotlib import animation from math import floor Colors = ['green', 'purple', 'orange', 'red', 'blue', 'yellow'] class Animation: def __init__(self, my_map, starts, goals, paths, predictions): self.my_map = np.flip(np.transpose(my_map), 1) self.predictions = predictions self.starts = [] for start in starts: self.starts.append((start[1], len(self.my_map[0]) - 1 - start[0])) self.goals = [] for goal in goals: self.goals.append((goal[1], len(self.my_map[0]) - 1 - goal[0])) self.paths = [] if paths: for path in paths: self.paths.append([]) for loc in path: self.paths[-1].append((loc[1], len(self.my_map[0]) - 1 - loc[0])) aspect = len(self.my_map) / len(self.my_map[0]) self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4)) self.ax = self.fig.add_subplot(111, aspect='equal') self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None) # self.ax.set_frame_on(False) self.patches = [] self.artists = [] self.agents = dict() self.agent_names = dict() self.goal_predictions = dict() self.agent_goal_connections = dict() # create boundary patch x_min = -0.5 y_min = -0.5 x_max = len(self.my_map) - 0.5 y_max = len(self.my_map[0]) - 0.5 plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(np.arange(x_min, x_max, 1)) plt.yticks(np.arange(y_min, y_max, 1)) plt.grid(color='0.85') self.patches.append(Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, facecolor='none', edgecolor='gray')) for i in range(len(self.my_map)): for j in range(len(self.my_map[0])): if self.my_map[i][j]: self.patches.append(Rectangle((i - 0.5, j - 0.5), 1, 1, facecolor='gray', edgecolor='gray')) self.T = 0 # draw goals for i, goal in enumerate(self.goals): goal_color = Colors[i % len(Colors)] self.patches.append(Rectangle((goal[0] - 0.25, goal[1] - 0.25), 0.5, 0.5, facecolor=goal_color, edgecolor='black', alpha=0.5)) # create agents for a in range(len(self.paths)): name = str(a) self.agents[a] = Circle((starts[a][0], starts[a][1]), 0.3, facecolor=Colors[a % len(Colors)], edgecolor='black') self.agents[a].original_face_color = Colors[a % len(Colors)] self.patches.append(self.agents[a]) self.T = max(self.T, len(paths[a]) - 1) self.agent_names[a] = self.ax.text(starts[a][0], starts[a][1] + 0.25, name) self.agent_names[a].set_horizontalalignment('center') self.agent_names[a].set_verticalalignment('center') self.artists.append(self.agent_names[a]) # connections & predictions self.goal_predictions[a] = dict() self.agent_goal_connections[a] = dict() for i, goal in enumerate(self.goals): goal_color = Colors[i % len(Colors)] self.goal_predictions[a][i] = self.ax.text(goal[0], goal[1], str(i)) self.goal_predictions[a][i].set_horizontalalignment('center') self.goal_predictions[a][i].set_verticalalignment('center') self.artists.append(self.goal_predictions[a][i]) self.agent_goal_connections[a][i] = plt.Line2D((start[1], goal[0]), (len(self.my_map[0]) - 1 - start[0], goal[1]), lw=2.5, color = goal_color) self.artists.append(self.agent_goal_connections[a][i]) self.animation = animation.FuncAnimation(self.fig, self.animate_func, init_func=self.init_func, frames=int(self.T + 1) * 10, interval=100, blit=True) def save(self, file_name, speed): self.animation.save( file_name, fps=10 * speed, dpi=200, savefig_kwargs={"pad_inches": 0}) @staticmethod def show(): plt.show() def init_func(self): for p in self.patches: self.ax.add_patch(p) for a in self.artists: self.ax.add_artist(a) return self.patches + self.artists def animate_func(self, t): # per ogni agente for a in range(len(self.paths)): pos = self.get_state(t / 10, self.paths[a]) self.agents[a].center = (pos[0], pos[1]) self.agent_names[a].set_position((pos[0], pos[1] + 0.5)) # per ogni goal for i in self.agent_goal_connections[a]: timestep = floor(t/10) if timestep not in self.predictions[a]: continue prediction = self.predictions[a][timestep][i] # Linee self.agent_goal_connections[a][i].set_data([pos[0], self.goals[i][0]], [pos[1], self.goals[i][1]]) self.agent_goal_connections[a][i].set_alpha(prediction) # Percentuali self.goal_predictions[a][i].set_text("{:.2f}".format(prediction*100)) self.goal_predictions[a][i].set_position([(pos[0] + self.goals[i][0])/2, (pos[1] + self.goals[i][1])/2]) self.goal_predictions[a][i].set_alpha(prediction) # reset all colors for _, agent in self.agents.items(): agent.set_facecolor(agent.original_face_color) # check drive-drive collisions agents_array = [agent for _, agent in self.agents.items()] for i in range(0, len(agents_array)): for j in range(i + 1, len(agents_array)): d1 = agents_array[i] d2 = agents_array[j] pos1 = np.array(d1.center) pos2 = np.array(d2.center) if np.linalg.norm(pos1 - pos2) < 0.7: d1.set_facecolor('red') d2.set_facecolor('red') print("COLLISION! (agent-agent) ({}, {}) at time {}".format(i, j, t/10)) return self.patches + self.artists @staticmethod def get_state(t, path): if int(t) <= 0: return np.array(path[0]) elif int(t) >= len(path): return np.array(path[-1]) else: pos_last = np.array(path[int(t) - 1]) pos_next = np.array(path[int(t)]) pos = (pos_next - pos_last) * (t - int(t)) + pos_last return pos
visualize.py
6,965
!/usr/bin/env python3 self.ax.set_frame_on(False) create boundary patch draw goals create agents connections & predictions per ogni agente per ogni goal Linee Percentuali reset all colors check drive-drive collisions
216
en
0.495137
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch import torch.nn.functional as F from torch import nn from fcos_core.structures.bounding_box import BoxList from fcos_core.structures.boxlist_ops import boxlist_nms from fcos_core.structures.boxlist_ops import cat_boxlist from fcos_core.modeling.box_coder import BoxCoder class PostProcessor(nn.Module): """ From a set of classification scores, box regression and proposals, computes the post-processed boxes, and applies NMS to obtain the final results """ def __init__( self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cls_agnostic_bbox_reg=False, bbox_aug_enabled=False ): """ Arguments: score_thresh (float) nms (float) detections_per_img (int) box_coder (BoxCoder) """ super(PostProcessor, self).__init__() self.score_thresh = score_thresh self.nms = nms self.detections_per_img = detections_per_img if box_coder is None: box_coder = BoxCoder(weights=(10., 10., 5., 5.)) self.box_coder = box_coder self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg self.bbox_aug_enabled = bbox_aug_enabled def forward(self, x, boxes): """ Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores """ class_logits, box_regression = x class_prob = F.softmax(class_logits, -1) # TODO think about a representation of batch of boxes image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] concat_boxes = torch.cat([a.bbox for a in boxes], dim=0) if self.cls_agnostic_bbox_reg: box_regression = box_regression[:, -4:] proposals = self.box_coder.decode( box_regression.view(sum(boxes_per_image), -1), concat_boxes ) if self.cls_agnostic_bbox_reg: proposals = proposals.repeat(1, class_prob.shape[1]) num_classes = class_prob.shape[1] proposals = proposals.split(boxes_per_image, dim=0) class_prob = class_prob.split(boxes_per_image, dim=0) results = [] for prob, boxes_per_img, image_shape in zip( class_prob, proposals, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape) boxlist = boxlist.clip_to_image(remove_empty=False) if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later boxlist = self.filter_results(boxlist, num_classes) results.append(boxlist) return results def prepare_boxlist(self, boxes, scores, image_shape): """ Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. """ boxes = boxes.reshape(-1, 4) scores = scores.reshape(-1) boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) return boxlist def filter_results(self, boxlist, num_classes): """Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). """ # unwrap the boxlist to avoid additional overhead. # if we had multi-class NMS, we could perform this directly on the boxlist boxes = boxlist.bbox.reshape(-1, num_classes * 4) scores = boxlist.get_field("scores").reshape(-1, num_classes) device = scores.device result = [] # Apply threshold on detection probabilities and apply NMS # Skip j = 0, because it's the background class inds_all = scores > self.score_thresh for j in range(1, num_classes): inds = inds_all[:, j].nonzero().squeeze(1) scores_j = scores[inds, j] boxes_j = boxes[inds, j * 4 : (j + 1) * 4] boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy") boxlist_for_class.add_field("scores", scores_j) boxlist_for_class = boxlist_nms( boxlist_for_class, self.nms ) num_labels = len(boxlist_for_class) boxlist_for_class.add_field( "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device) ) result.append(boxlist_for_class) result = cat_boxlist(result) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.detections_per_img > 0: cls_scores = result.get_field("scores") image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.detections_per_img + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] return result def make_roi_box_post_processor(cfg): use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS box_coder = BoxCoder(weights=bbox_reg_weights) score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH nms_thresh = cfg.MODEL.ROI_HEADS.NMS detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED postprocessor = PostProcessor( score_thresh, nms_thresh, detections_per_img, box_coder, cls_agnostic_bbox_reg, bbox_aug_enabled ) return postprocessor
fcos_core/modeling/roi_heads/box_head/inference.py
6,830
From a set of classification scores, box regression and proposals, computes the post-processed boxes, and applies NMS to obtain the final results Arguments: score_thresh (float) nms (float) detections_per_img (int) box_coder (BoxCoder) Returns bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: x (tuple[tensor, tensor]): x contains the class logits and the box_regression from the model. boxes (list[BoxList]): bounding boxes that are used as reference, one for ech image Returns: results (list[BoxList]): one BoxList for each image, containing the extra fields labels and scores Returns BoxList from `boxes` and adds probability scores information as an extra field `boxes` has shape (#detections, 4 * #classes), where each row represents a list of predicted bounding boxes for each of the object classes in the dataset (including the background class). The detections in each row originate from the same object proposal. `scores` has shape (#detection, #classes), where each row represents a list of object detection confidence scores for each of the object classes in the dataset (including the background class). `scores[i, j]`` corresponds to the box at `boxes[i, j * 4:(j + 1) * 4]`. Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. TODO think about a representation of batch of boxes If bbox aug is enabled, we will do it later unwrap the boxlist to avoid additional overhead. if we had multi-class NMS, we could perform this directly on the boxlist Apply threshold on detection probabilities and apply NMS Skip j = 0, because it's the background class Limit to max_per_image detections **over all classes**
1,752
en
0.843109
from __future__ import annotations import datetime from functools import partial from textwrap import dedent from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations from pandas._typing import ( Axis, TimedeltaConvertibleTypes, ) if TYPE_CHECKING: from pandas import DataFrame, Series from pandas.core.generic import NDFrame from pandas.compat.numpy import function as nv from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.core.dtypes.missing import isna import pandas.core.common as common # noqa: PDF018 from pandas.core.indexers.objects import ( BaseIndexer, ExponentialMovingWindowIndexer, GroupbyIndexer, ) from pandas.core.util.numba_ import maybe_use_numba from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( _shared_docs, args_compat, create_section_header, kwargs_compat, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, ) from pandas.core.window.numba_ import ( generate_ewma_numba_table_func, generate_numba_ewma_func, ) from pandas.core.window.online import ( EWMMeanState, generate_online_numba_ewma_func, ) from pandas.core.window.rolling import ( BaseWindow, BaseWindowGroupby, ) def get_center_of_mass( comass: float | None, span: float | None, halflife: float | None, alpha: float | None, ) -> float: valid_count = common.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 if comass is not None: if comass < 0: raise ValueError("comass must satisfy: comass >= 0") elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") comass = (span - 1) / 2 elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") decay = 1 - np.exp(np.log(0.5) / halflife) comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") comass = (1 - alpha) / alpha else: raise ValueError("Must pass one of comass, span, halflife, or alpha") return float(comass) def _calculate_deltas( times: str | np.ndarray | NDFrame | None, halflife: float | TimedeltaConvertibleTypes | None, ) -> np.ndarray: """ Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : str, np.ndarray, Series, default None Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life """ # error: Item "str" of "Union[str, ndarray, NDFrameT, None]" has no # attribute "view" # error: Item "None" of "Union[str, ndarray, NDFrameT, None]" has no # attribute "view" _times = np.asarray( times.view(np.int64), dtype=np.float64 # type: ignore[union-attr] ) _halflife = float(Timedelta(halflife).value) return np.diff(_times) / _halflife class ExponentialMovingWindow(BaseWindow): r""" Provide exponential weighted (EW) functions. Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``. Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be provided. Parameters ---------- com : float, optional Specify decay in terms of center of mass, :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, str, timedelta, optional Specify decay in terms of half-life, :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. If ``times`` is specified, the time unit (str or timedelta) over which an observation decays to half its value. Only applicable to ``mean()`` and halflife value will not apply to the other functions. .. versionadded:: 1.1.0 alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. min_periods : int, default 0 Minimum number of observations in window required to have a value (otherwise result is NA). adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average). - When ``adjust=True`` (default), the EW function is calculated using weights :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series [:math:`x_0, x_1, ..., x_t`] would be: .. math:: y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} - When ``adjust=False``, the exponentially weighted function is calculated recursively: .. math:: \begin{split} y_0 &= x_0\\ y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, \end{split} ignore_na : bool, default False Ignore missing values when calculating weights; specify ``True`` to reproduce pre-0.15.0 behavior. - When ``ignore_na=False`` (default), weights are based on absolute positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. axis : {0, 1}, default 0 The axis to use. The value 0 identifies the rows, and 1 identifies the columns. times : str, np.ndarray, Series, default None .. versionadded:: 1.1.0 Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. If str, the name of the column in the DataFrame representing the times. If 1-D array like, a sequence with the same shape as the observations. Only applicable to ``mean()``. method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. Only applicable to ``mean()`` .. versionadded:: 1.4.0 Returns ------- DataFrame A Window sub-classed for the particular operation. See Also -------- rolling : Provides rolling window calculations. expanding : Provides expanding transformations. Notes ----- More details can be found at: :ref:`Exponentially weighted windows <window.exponentially_weighted>`. Examples -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) >>> df B 0 0.0 1 1.0 2 2.0 3 NaN 4 4.0 >>> df.ewm(com=0.5).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 Specifying ``times`` with a timedelta ``halflife`` when computing mean. >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() B 0 0.000000 1 0.585786 2 1.523889 3 1.523889 4 3.233686 """ _attributes = [ "com", "span", "halflife", "alpha", "min_periods", "adjust", "ignore_na", "axis", "times", "method", ] def __init__( self, obj: NDFrame, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool = True, ignore_na: bool = False, axis: Axis = 0, times: str | np.ndarray | NDFrame | None = None, method: str = "single", *, selection=None, ): super().__init__( obj=obj, min_periods=1 if min_periods is None else max(int(min_periods), 1), on=None, center=False, closed=None, method=method, axis=axis, selection=selection, ) self.com = com self.span = span self.halflife = halflife self.alpha = alpha self.adjust = adjust self.ignore_na = ignore_na self.times = times if self.times is not None: if not self.adjust: raise NotImplementedError("times is not supported with adjust=False.") if isinstance(self.times, str): warnings.warn( ( "Specifying times as a string column label is deprecated " "and will be removed in a future version. Pass the column " "into times instead." ), FutureWarning, stacklevel=find_stack_level(), ) self.times = self._selected_obj[self.times] if not is_datetime64_ns_dtype(self.times): raise ValueError("times must be datetime64[ns] dtype.") # error: Argument 1 to "len" has incompatible type "Union[str, ndarray, # NDFrameT, None]"; expected "Sized" if len(self.times) != len(obj): # type: ignore[arg-type] raise ValueError("times must be the same length as the object.") if not isinstance(self.halflife, (str, datetime.timedelta)): raise ValueError( "halflife must be a string or datetime.timedelta object" ) if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") self._deltas = _calculate_deltas(self.times, self.halflife) # Halflife is no longer applicable when calculating COM # But allow COM to still be calculated if the user passes other decay args if common.count_not_none(self.com, self.span, self.alpha) > 0: self._com = get_center_of_mass(self.com, self.span, None, self.alpha) else: self._com = 1.0 else: if self.halflife is not None and isinstance( self.halflife, (str, datetime.timedelta) ): raise ValueError( "halflife can only be a timedelta convertible argument if " "times is not None." ) # Without times, points are equally spaced self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64) self._com = get_center_of_mass( # error: Argument 3 to "get_center_of_mass" has incompatible type # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; # expected "Optional[float]" self.com, self.span, self.halflife, # type: ignore[arg-type] self.alpha, ) def _get_window_indexer(self) -> BaseIndexer: """ Return an indexer class that will compute the window start and end bounds """ return ExponentialMovingWindowIndexer() def online(self, engine="numba", engine_kwargs=None): """ Return an ``OnlineExponentialMovingWindow`` object to calculate exponentially moving window aggregations in an online method. .. versionadded:: 1.3.0 Parameters ---------- engine: str, default ``'numba'`` Execution engine to calculate online aggregations. Applies to all supported aggregation methods. engine_kwargs : dict, default None Applies to all supported aggregation methods. * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be applied to the function Returns ------- OnlineExponentialMovingWindow """ return OnlineExponentialMovingWindow( obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, axis=self.axis, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection, ) @doc( _shared_docs["aggregate"], see_also=dedent( """ See Also -------- pandas.DataFrame.rolling.aggregate """ ), examples=dedent( """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df A B C 0 1 4 7 1 2 5 8 2 3 6 9 >>> df.ewm(alpha=0.5).mean() A B C 0 1.000000 4.000000 7.000000 1 1.666667 4.666667 7.666667 2 2.428571 5.428571 8.428571 """ ), klass="Series/Dataframe", axis="", ) def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc( template_header, create_section_header("Parameters"), args_compat, window_agg_numba_parameters, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also, create_section_header("Notes"), numba_notes.replace("\n", "", 1), window_method="ewm", aggregation_description="(exponential weighted moment) mean", agg_method="mean", ) def mean(self, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): if self.method == "single": ewma_func = generate_numba_ewma_func( engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas ) numba_cache_key = (lambda x: x, "ewma") else: ewma_func = generate_ewma_numba_table_func( engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas ) numba_cache_key = (lambda x: x, "ewma_table") return self._apply( ewma_func, numba_cache_key=numba_cache_key, ) elif engine in ("cython", None): if engine_kwargs is not None: raise ValueError("cython engine does not accept engine_kwargs") nv.validate_window_func("mean", args, kwargs) deltas = None if self.times is None else self._deltas window_func = partial( window_aggregations.ewma, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, ) return self._apply(window_func) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc( template_header, create_section_header("Parameters"), dedent( """ bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), args_compat, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) standard deviation", agg_method="std", ) def std(self, bias: bool = False, *args, **kwargs): nv.validate_window_func("std", args, kwargs) return zsqrt(self.var(bias=bias, **kwargs)) def vol(self, bias: bool = False, *args, **kwargs): warnings.warn( ( "vol is deprecated will be removed in a future version. " "Use std instead." ), FutureWarning, stacklevel=2, ) return self.std(bias, *args, **kwargs) @doc( template_header, create_section_header("Parameters"), dedent( """ bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), args_compat, kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) variance", agg_method="var", ) def var(self, bias: bool = False, *args, **kwargs): nv.validate_window_func("var", args, kwargs) window_func = window_aggregations.ewmcov wfunc = partial( window_func, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, bias=bias, ) def var_func(values, begin, end, min_periods): return wfunc(values, begin, end, min_periods, values) return self._apply(var_func) @doc( template_header, create_section_header("Parameters"), dedent( """ other : Series or DataFrame , optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. bias : bool, default False Use a standard estimation bias correction. """ ).replace("\n", "", 1), kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) sample covariance", agg_method="cov", ) def cov( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, bias: bool = False, **kwargs, ): from pandas import Series def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, ) result = window_aggregations.ewmcov( x_array, start, end, # error: Argument 4 to "ewmcov" has incompatible type # "Optional[int]"; expected "int" self.min_periods, # type: ignore[arg-type] y_array, self._com, self.adjust, self.ignore_na, bias, ) return Series(result, index=x.index, name=x.name) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) @doc( template_header, create_section_header("Parameters"), dedent( """ other : Series or DataFrame, optional If not supplied then will default to self and produce pairwise output. pairwise : bool, default None If False then only matching columns between self and other will be used and the output will be a DataFrame. If True then all pairwise combinations will be calculated and the output will be a MultiIndex DataFrame in the case of DataFrame inputs. In the case of missing elements, only complete pairwise observations will be used. """ ).replace("\n", "", 1), kwargs_compat, create_section_header("Returns"), template_returns, create_section_header("See Also"), template_see_also[:-1], window_method="ewm", aggregation_description="(exponential weighted moment) sample correlation", agg_method="corr", ) def corr( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, **kwargs, ): from pandas import Series def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = ( self.min_periods if self.min_periods is not None else window_indexer.window_size ) start, end = window_indexer.get_window_bounds( num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, ) def _cov(X, Y): return window_aggregations.ewmcov( X, start, end, min_periods, Y, self._com, self.adjust, self.ignore_na, True, ) with np.errstate(all="ignore"): cov = _cov(x_array, y_array) x_var = _cov(x_array, x_array) y_var = _cov(y_array, y_array) result = cov / zsqrt(x_var * y_var) return Series(result, index=x.index, name=x.name) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): """ Provide an exponential moving window groupby implementation. """ _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes def __init__(self, obj, *args, _grouper=None, **kwargs): super().__init__(obj, *args, _grouper=_grouper, **kwargs) if not obj.empty and self.times is not None: # sort the times and recalculate the deltas according to the groups groupby_order = np.concatenate(list(self._grouper.indices.values())) self._deltas = _calculate_deltas( self.times.take(groupby_order), # type: ignore[union-attr] self.halflife, ) def _get_window_indexer(self) -> GroupbyIndexer: """ Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer """ window_indexer = GroupbyIndexer( groupby_indicies=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer, ) return window_indexer class OnlineExponentialMovingWindow(ExponentialMovingWindow): def __init__( self, obj: NDFrame, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool = True, ignore_na: bool = False, axis: Axis = 0, times: str | np.ndarray | NDFrame | None = None, engine: str = "numba", engine_kwargs: dict[str, bool] | None = None, *, selection=None, ): if times is not None: raise NotImplementedError( "times is not implemented with online operations." ) super().__init__( obj=obj, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, selection=selection, ) self._mean = EWMMeanState( self._com, self.adjust, self.ignore_na, self.axis, obj.shape ) if maybe_use_numba(engine): self.engine = engine self.engine_kwargs = engine_kwargs else: raise ValueError("'numba' is the only supported engine") def reset(self): """ Reset the state captured by `update` calls. """ self._mean.reset() def aggregate(self, func, *args, **kwargs): return NotImplementedError def std(self, bias: bool = False, *args, **kwargs): return NotImplementedError def corr( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, **kwargs, ): return NotImplementedError def cov( self, other: DataFrame | Series | None = None, pairwise: bool | None = None, bias: bool = False, **kwargs, ): return NotImplementedError def var(self, bias: bool = False, *args, **kwargs): return NotImplementedError def mean(self, *args, update=None, update_times=None, **kwargs): """ Calculate an online exponentially weighted mean. Parameters ---------- update: DataFrame or Series, default None New values to continue calculating the exponentially weighted mean from the last values and weights. Values should be float64 dtype. ``update`` needs to be ``None`` the first time the exponentially weighted mean is calculated. update_times: Series or 1-D np.ndarray, default None New times to continue calculating the exponentially weighted mean from the last values and weights. If ``None``, values are assumed to be evenly spaced in time. This feature is currently unsupported. Returns ------- DataFrame or Series Examples -------- >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)}) >>> online_ewm = df.head(2).ewm(0.5).online() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 >>> online_ewm.mean(update=df.tail(3)) a b 2 1.615385 6.615385 3 2.550000 7.550000 4 3.520661 8.520661 >>> online_ewm.reset() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 """ result_kwargs = {} is_frame = True if self._selected_obj.ndim == 2 else False if update_times is not None: raise NotImplementedError("update_times is not implemented.") else: update_deltas = np.ones( max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 ) if update is not None: if self._mean.last_ewm is None: raise ValueError( "Must call mean with update=None first before passing update" ) result_from = 1 result_kwargs["index"] = update.index if is_frame: last_value = self._mean.last_ewm[np.newaxis, :] result_kwargs["columns"] = update.columns else: last_value = self._mean.last_ewm result_kwargs["name"] = update.name np_array = np.concatenate((last_value, update.to_numpy())) else: result_from = 0 result_kwargs["index"] = self._selected_obj.index if is_frame: result_kwargs["columns"] = self._selected_obj.columns else: result_kwargs["name"] = self._selected_obj.name np_array = self._selected_obj.astype(np.float64).to_numpy() ewma_func = generate_online_numba_ewma_func(self.engine_kwargs) result = self._mean.run_ewm( np_array if is_frame else np_array[:, np.newaxis], update_deltas, self.min_periods, ewma_func, ) if not is_frame: result = result.squeeze() result = result[result_from:] result = self._selected_obj._constructor(result, **result_kwargs) return result
pandas/core/window/ewm.py
30,704
Provide exponential weighted (EW) functions. Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``. Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be provided. Parameters ---------- com : float, optional Specify decay in terms of center of mass, :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, str, timedelta, optional Specify decay in terms of half-life, :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. If ``times`` is specified, the time unit (str or timedelta) over which an observation decays to half its value. Only applicable to ``mean()`` and halflife value will not apply to the other functions. .. versionadded:: 1.1.0 alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. min_periods : int, default 0 Minimum number of observations in window required to have a value (otherwise result is NA). adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average). - When ``adjust=True`` (default), the EW function is calculated using weights :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series [:math:`x_0, x_1, ..., x_t`] would be: .. math:: y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} - When ``adjust=False``, the exponentially weighted function is calculated recursively: .. math:: \begin{split} y_0 &= x_0\\ y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, \end{split} ignore_na : bool, default False Ignore missing values when calculating weights; specify ``True`` to reproduce pre-0.15.0 behavior. - When ``ignore_na=False`` (default), weights are based on absolute positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. axis : {0, 1}, default 0 The axis to use. The value 0 identifies the rows, and 1 identifies the columns. times : str, np.ndarray, Series, default None .. versionadded:: 1.1.0 Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. If str, the name of the column in the DataFrame representing the times. If 1-D array like, a sequence with the same shape as the observations. Only applicable to ``mean()``. method : str {'single', 'table'}, default 'single' Execute the rolling operation per single column or row (``'single'``) or over the entire object (``'table'``). This argument is only implemented when specifying ``engine='numba'`` in the method call. Only applicable to ``mean()`` .. versionadded:: 1.4.0 Returns ------- DataFrame A Window sub-classed for the particular operation. See Also -------- rolling : Provides rolling window calculations. expanding : Provides expanding transformations. Notes ----- More details can be found at: :ref:`Exponentially weighted windows <window.exponentially_weighted>`. Examples -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) >>> df B 0 0.0 1 1.0 2 2.0 3 NaN 4 4.0 >>> df.ewm(com=0.5).mean() B 0 0.000000 1 0.750000 2 1.615385 3 1.615385 4 3.670213 Specifying ``times`` with a timedelta ``halflife`` when computing mean. >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() B 0 0.000000 1 0.585786 2 1.523889 3 1.523889 4 3.233686 Provide an exponential moving window groupby implementation. Return the diff of the times divided by the half-life. These values are used in the calculation of the ewm mean. Parameters ---------- times : str, np.ndarray, Series, default None Times corresponding to the observations. Must be monotonically increasing and ``datetime64[ns]`` dtype. halflife : float, str, timedelta, optional Half-life specifying the decay Returns ------- np.ndarray Diff of the times divided by the half-life Return an indexer class that will compute the window start and end bounds Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer Calculate an online exponentially weighted mean. Parameters ---------- update: DataFrame or Series, default None New values to continue calculating the exponentially weighted mean from the last values and weights. Values should be float64 dtype. ``update`` needs to be ``None`` the first time the exponentially weighted mean is calculated. update_times: Series or 1-D np.ndarray, default None New times to continue calculating the exponentially weighted mean from the last values and weights. If ``None``, values are assumed to be evenly spaced in time. This feature is currently unsupported. Returns ------- DataFrame or Series Examples -------- >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)}) >>> online_ewm = df.head(2).ewm(0.5).online() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 >>> online_ewm.mean(update=df.tail(3)) a b 2 1.615385 6.615385 3 2.550000 7.550000 4 3.520661 8.520661 >>> online_ewm.reset() >>> online_ewm.mean() a b 0 0.00 5.00 1 0.75 5.75 Return an ``OnlineExponentialMovingWindow`` object to calculate exponentially moving window aggregations in an online method. .. versionadded:: 1.3.0 Parameters ---------- engine: str, default ``'numba'`` Execution engine to calculate online aggregations. Applies to all supported aggregation methods. engine_kwargs : dict, default None Applies to all supported aggregation methods. * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be applied to the function Returns ------- OnlineExponentialMovingWindow Reset the state captured by `update` calls. noqa: PDF018 Convert to center of mass; domain checks ensure 0 < alpha <= 1 error: Item "str" of "Union[str, ndarray, NDFrameT, None]" has no attribute "view" error: Item "None" of "Union[str, ndarray, NDFrameT, None]" has no attribute "view" type: ignore[union-attr] error: Argument 1 to "len" has incompatible type "Union[str, ndarray, NDFrameT, None]"; expected "Sized" type: ignore[arg-type] Halflife is no longer applicable when calculating COM But allow COM to still be calculated if the user passes other decay args Without times, points are equally spaced error: Argument 3 to "get_center_of_mass" has incompatible type "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; expected "Optional[float]" type: ignore[arg-type] error: Argument 4 to "ewmcov" has incompatible type "Optional[int]"; expected "int" type: ignore[arg-type] sort the times and recalculate the deltas according to the groups type: ignore[union-attr]
8,027
en
0.58862
# -*- coding: utf-8 -*- """ Production Configurations - Use Amazon's S3 for storing static files and uploaded media - Use mailgun to send emails - Use Redis for cache """ from __future__ import absolute_import, unicode_literals from django.utils import six from .common import * # noqa # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ SECRET_KEY = env('DJANGO_SECRET_KEY') # This ensures that Django will be able to detect a secure connection # properly on Heroku. SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Use Whitenoise to serve static files # See: https://whitenoise.readthedocs.io/ WHITENOISE_MIDDLEWARE = ('whitenoise.middleware.WhiteNoiseMiddleware', ) MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE # SECURITY CONFIGURATION # ------------------------------------------------------------------------------ # See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security # and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy # set this to 60 seconds and then to 518400 when you can prove it works SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True) SECURE_CONTENT_TYPE_NOSNIFF = env.bool( 'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True) SECURE_BROWSER_XSS_FILTER = True SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True) CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY' # SITE CONFIGURATION # ------------------------------------------------------------------------------ # Hosts/domain names that are valid for this site # See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com']) # END SITE CONFIGURATION INSTALLED_APPS += ('gunicorn', ) # STORAGE CONFIGURATION # ------------------------------------------------------------------------------ # Uploaded Media Files # ------------------------ # See: http://django-storages.readthedocs.io/en/latest/index.html INSTALLED_APPS += ( 'storages', ) # Static Assets # ------------------------ # STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # EMAIL # ------------------------------------------------------------------------------ DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='course_portal <noreply@example.com>') EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[course_portal] ') SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL) # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: # https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # Use the Heroku-style specification # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ DATABASES['default'] = env.db('DATABASE_URL') # Custom Admin URL, use {% url 'admin:index' %} ADMIN_URL = env('DJANGO_ADMIN_URL') # Your production stuff: Below this line define 3rd party library settings # ------------------------------------------------------------------------------
config/settings/production.py
3,904
Production Configurations - Use Amazon's S3 for storing static files and uploaded media - Use mailgun to send emails - Use Redis for cache -*- coding: utf-8 -*- noqa SECRET CONFIGURATION ------------------------------------------------------------------------------ See: https://docs.djangoproject.com/en/dev/ref/settings/secret-key Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ This ensures that Django will be able to detect a secure connection properly on Heroku. Use Whitenoise to serve static files See: https://whitenoise.readthedocs.io/ SECURITY CONFIGURATION ------------------------------------------------------------------------------ See https://docs.djangoproject.com/en/1.9/ref/middleware/module-django.middleware.security and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/run-manage-py-check-deploy set this to 60 seconds and then to 518400 when you can prove it works SITE CONFIGURATION ------------------------------------------------------------------------------ Hosts/domain names that are valid for this site See https://docs.djangoproject.com/en/1.6/ref/settings/allowed-hosts END SITE CONFIGURATION STORAGE CONFIGURATION ------------------------------------------------------------------------------ Uploaded Media Files ------------------------ See: http://django-storages.readthedocs.io/en/latest/index.html Static Assets ------------------------ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' EMAIL ------------------------------------------------------------------------------ TEMPLATE CONFIGURATION ------------------------------------------------------------------------------ See: https://docs.djangoproject.com/en/dev/ref/templates/api/django.template.loaders.cached.Loader DATABASE CONFIGURATION ------------------------------------------------------------------------------ Use the Heroku-style specification Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ Custom Admin URL, use {% url 'admin:index' %} Your production stuff: Below this line define 3rd party library settings ------------------------------------------------------------------------------
2,197
en
0.460336
import torch from mmcv.cnn import ConvModule from torch import nn as nn from mmdet3d.models.builder import build_loss class VoteModule(nn.Module): """Vote module. Generate votes from seed point features. Args: in_channels (int): Number of channels of seed point features. vote_per_seed (int): Number of votes generated from each seed point. gt_per_seed (int): Number of ground truth votes generated from each seed point. conv_channels (tuple[int]): Out channels of vote generating convolution. conv_cfg (dict): Config of convolution. Default: dict(type='Conv1d'). norm_cfg (dict): Config of normalization. Default: dict(type='BN1d'). norm_feats (bool): Whether to normalize features. Default: True. vote_loss (dict): Config of vote loss. """ def __init__(self, in_channels, vote_per_seed=1, gt_per_seed=3, conv_channels=(16, 16), conv_cfg=dict(type='Conv1d'), norm_cfg=dict(type='BN1d'), norm_feats=True, vote_loss=None): super().__init__() self.in_channels = in_channels self.vote_per_seed = vote_per_seed self.gt_per_seed = gt_per_seed self.norm_feats = norm_feats self.vote_loss = build_loss(vote_loss) prev_channels = in_channels vote_conv_list = list() for k in range(len(conv_channels)): vote_conv_list.append( ConvModule( prev_channels, conv_channels[k], 1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True, inplace=True)) prev_channels = conv_channels[k] self.vote_conv = nn.Sequential(*vote_conv_list) # conv_out predicts coordinate and residual features out_channel = (3 + in_channels) * self.vote_per_seed self.conv_out = nn.Conv1d(prev_channels, out_channel, 1) def forward(self, seed_points, seed_feats): """forward. Args: seed_points (torch.Tensor): Coordinate of the seed points in shape (B, N, 3). seed_feats (torch.Tensor): Features of the seed points in shape (B, C, N). Returns: tuple[torch.Tensor]: - vote_points: Voted xyz based on the seed points \ with shape (B, M, 3), ``M=num_seed*vote_per_seed``. - vote_features: Voted features based on the seed points with \ shape (B, C, M) where ``M=num_seed*vote_per_seed``, \ ``C=vote_feature_dim``. """ batch_size, feat_channels, num_seed = seed_feats.shape num_vote = num_seed * self.vote_per_seed x = self.vote_conv(seed_feats) # (batch_size, (3+out_dim)*vote_per_seed, num_seed) votes = self.conv_out(x) votes = votes.transpose(2, 1).view(batch_size, num_seed, self.vote_per_seed, -1) offset = votes[:, :, :, 0:3] res_feats = votes[:, :, :, 3:] vote_points = (seed_points.unsqueeze(2) + offset).contiguous() vote_points = vote_points.view(batch_size, num_vote, 3) vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + res_feats).contiguous() vote_feats = vote_feats.view(batch_size, num_vote, feat_channels).transpose(2, 1).contiguous() if self.norm_feats: features_norm = torch.norm(vote_feats, p=2, dim=1) vote_feats = vote_feats.div(features_norm.unsqueeze(1)) return vote_points, vote_feats def get_loss(self, seed_points, vote_points, seed_indices, vote_targets_mask, vote_targets): """Calculate loss of voting module. Args: seed_points (torch.Tensor): Coordinate of the seed points. vote_points (torch.Tensor): Coordinate of the vote points. seed_indices (torch.Tensor): Indices of seed points in raw points. vote_targets_mask (torch.Tensor): Mask of valid vote targets. vote_targets (torch.Tensor): Targets of votes. Returns: torch.Tensor: Weighted vote loss. """ batch_size, num_seed = seed_points.shape[:2] seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, seed_indices).float() seed_indices_expand = seed_indices.unsqueeze(-1).repeat( 1, 1, 3 * self.gt_per_seed) seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand) seed_gt_votes += seed_points.repeat(1, 1, 3) weight = seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-6) distance = self.vote_loss( vote_points.view(batch_size * num_seed, -1, 3), seed_gt_votes.view(batch_size * num_seed, -1, 3), dst_weight=weight.view(batch_size * num_seed, 1))[1] vote_loss = torch.sum(torch.min(distance, dim=1)[0]) return vote_loss
mmdet3d/models/model_utils/vote_module.py
5,378
Vote module. Generate votes from seed point features. Args: in_channels (int): Number of channels of seed point features. vote_per_seed (int): Number of votes generated from each seed point. gt_per_seed (int): Number of ground truth votes generated from each seed point. conv_channels (tuple[int]): Out channels of vote generating convolution. conv_cfg (dict): Config of convolution. Default: dict(type='Conv1d'). norm_cfg (dict): Config of normalization. Default: dict(type='BN1d'). norm_feats (bool): Whether to normalize features. Default: True. vote_loss (dict): Config of vote loss. forward. Args: seed_points (torch.Tensor): Coordinate of the seed points in shape (B, N, 3). seed_feats (torch.Tensor): Features of the seed points in shape (B, C, N). Returns: tuple[torch.Tensor]: - vote_points: Voted xyz based on the seed points with shape (B, M, 3), ``M=num_seed*vote_per_seed``. - vote_features: Voted features based on the seed points with shape (B, C, M) where ``M=num_seed*vote_per_seed``, ``C=vote_feature_dim``. Calculate loss of voting module. Args: seed_points (torch.Tensor): Coordinate of the seed points. vote_points (torch.Tensor): Coordinate of the vote points. seed_indices (torch.Tensor): Indices of seed points in raw points. vote_targets_mask (torch.Tensor): Mask of valid vote targets. vote_targets (torch.Tensor): Targets of votes. Returns: torch.Tensor: Weighted vote loss. conv_out predicts coordinate and residual features (batch_size, (3+out_dim)*vote_per_seed, num_seed)
1,711
en
0.76329
"""Import/export any formats supported by meshio.""" import meshio import numpy as np import skfem MESH_TYPE_MAPPING = { 'tetra': skfem.MeshTet1, 'tetra10': skfem.MeshTet2, 'hexahedron': skfem.MeshHex1, 'hexahedron27': skfem.MeshHex2, 'wedge': skfem.MeshWedge1, 'triangle': skfem.MeshTri1, 'triangle6': skfem.MeshTri2, 'quad': skfem.MeshQuad1, 'quad9': skfem.MeshQuad2, 'line': skfem.MeshLine1, } BOUNDARY_TYPE_MAPPING = { 'line': 'vertex', 'triangle': 'line', 'quad': 'line', 'tetra': 'triangle', 'hexahedron': 'quad', 'tetra10': 'triangle', # TODO support quadratic facets 'triangle6': 'line', # TODO 'quad9': 'line', # TODO 'hexahedron27': 'quad', # TODO } TYPE_MESH_MAPPING = {MESH_TYPE_MAPPING[k]: k for k in dict(reversed(list(MESH_TYPE_MAPPING.items())))} HEX_MAPPING = [0, 3, 6, 2, 1, 5, 7, 4, 10, 16, 14, 9, 12, 18, 17, 11, 8, 15, 19, 13, 20, 25, 22, 23, 21, 24, 26] INV_HEX_MAPPING = [HEX_MAPPING.index(i) for i in range(len(HEX_MAPPING))] def from_meshio(m, out=None, int_data_to_sets=False, force_meshio_type=None): cells = m.cells_dict meshio_type = None if force_meshio_type is None: # detect 3D for k in cells: if k in {'tetra', 'hexahedron', 'tetra10', 'hexahedron27', 'wedge'}: meshio_type = k break if meshio_type is None: # detect 2D for k in cells: if k in {'triangle', 'quad', 'triangle6', 'quad9'}: meshio_type = k break if meshio_type is None: # detect 1D for k in cells: if k == 'line': meshio_type = k break else: meshio_type = force_meshio_type if meshio_type is None: raise NotImplementedError("Mesh type(s) not supported " "in import: {}.".format(cells.keys())) mesh_type = MESH_TYPE_MAPPING[meshio_type] # create p and t p = np.ascontiguousarray(mesh_type.strip_extra_coordinates(m.points).T) t = np.ascontiguousarray(cells[meshio_type].T) # reorder t if needed if meshio_type == 'hexahedron': t = t[INV_HEX_MAPPING[:8]] elif meshio_type == 'hexahedron27': t = t[INV_HEX_MAPPING] if int_data_to_sets: m.int_data_to_sets() subdomains = {} boundaries = {} # parse any subdomains from cell_sets if m.cell_sets: subdomains = {k: v[meshio_type] for k, v in m.cell_sets_dict.items() if meshio_type in v} # create temporary mesh for matching boundary elements mtmp = mesh_type(p, t) bnd_type = BOUNDARY_TYPE_MAPPING[meshio_type] # parse boundaries from cell_sets if m.cell_sets and bnd_type in m.cells_dict: facets = { k: [tuple(f) for f in np.sort(m.cells_dict[bnd_type][v[bnd_type]])] for k, v in m.cell_sets_dict.items() if bnd_type in v and k.split(":")[0] != "gmsh" } boundaries = {k: np.array([i for i, f in enumerate(map(tuple, mtmp.facets.T)) if f in v]) for k, v in facets.items()} # MSH 2.2 tag parsing if m.cell_data and m.field_data: try: elements_tag = m.cell_data_dict['gmsh:physical'][meshio_type] subdomains = {} tags = np.unique(elements_tag) def find_tagname(tag): for key in m.field_data: if m.field_data[key][0] == tag: return key return None for tag in tags: t_set = np.nonzero(tag == elements_tag)[0] subdomains[find_tagname(tag)] = t_set # find tagged boundaries if bnd_type in m.cell_data_dict['gmsh:physical']: facets = m.cells_dict[bnd_type] facets_tag = m.cell_data_dict['gmsh:physical'][bnd_type] # put meshio facets to dict dic = {tuple(np.sort(facets[i])): facets_tag[i] for i in range(facets.shape[0])} # get index of corresponding Mesh.facets for each meshio # facet found in the dict index = np.array([[dic[tuple(np.sort(mtmp.facets[:, i]))], i] for i in mtmp.boundary_facets() if tuple(np.sort(mtmp.facets[:, i])) in dic]) # read meshio tag numbers and names tags = index[:, 0] boundaries = {} for tag in np.unique(tags): tagindex = np.nonzero(tags == tag)[0] boundaries[find_tagname(tag)] = index[tagindex, 1] except Exception: pass # attempt parsing skfem tags if m.cell_data: _boundaries, _subdomains = mtmp._decode_cell_data(m.cell_data) boundaries.update(_boundaries) subdomains.update(_subdomains) # export mesh data if out is not None and isinstance(out, list): for i, field in enumerate(out): out[i] = getattr(m, field) return mesh_type( p, t, None if len(boundaries) == 0 else boundaries, None if len(subdomains) == 0 else subdomains, ) def from_file(filename, out, **kwargs): return from_meshio(meshio.read(filename), out, **kwargs) def to_meshio(mesh, point_data=None, cell_data=None, encode_cell_data=True, encode_point_data=False): t = mesh.dofs.element_dofs.copy() if isinstance(mesh, skfem.MeshHex2): t = t[HEX_MAPPING] elif isinstance(mesh, skfem.MeshHex): t = t[HEX_MAPPING[:8]] mtype = TYPE_MESH_MAPPING[type(mesh)] cells = {mtype: t.T} if encode_cell_data: if cell_data is None: cell_data = {} cell_data.update(mesh._encode_cell_data()) if encode_point_data: if point_data is None: point_data = {} point_data.update(mesh._encode_point_data()) mio = meshio.Mesh( mesh.p.T, cells, point_data=point_data, cell_data=cell_data, ) return mio def to_file(mesh, filename, point_data=None, cell_data=None, encode_cell_data=True, encode_point_data=False, **kwargs): meshio.write(filename, to_meshio(mesh, point_data, cell_data, encode_cell_data, encode_point_data), **kwargs)
skfem/io/meshio.py
7,062
Import/export any formats supported by meshio. TODO support quadratic facets TODO TODO TODO detect 3D detect 2D detect 1D create p and t reorder t if needed parse any subdomains from cell_sets create temporary mesh for matching boundary elements parse boundaries from cell_sets MSH 2.2 tag parsing find tagged boundaries put meshio facets to dict get index of corresponding Mesh.facets for each meshio facet found in the dict read meshio tag numbers and names attempt parsing skfem tags export mesh data
505
en
0.631504
from __future__ import annotations from abc import abstractmethod, ABC from decimal import Decimal from enum import Enum from typing import Dict, cast import numpy as np # A few extra general types from slim.simulation.lice_population import LicePopulation, GenoDistrib, GenoTreatmentValue,\ Alleles, GenoTreatmentDistrib Money = Decimal class Treatment(Enum): """ A stub for treatment types TODO: add other treatments here """ EMB = 0 THERMOLICER = 1 class GeneticMechanism(Enum): """ Genetic mechanism to be used when generating egg genotypes """ DISCRETE = 1 MATERNAL = 2 class HeterozygousResistance(Enum): """ Resistance in a monogenic, heterozygous setting. """ DOMINANT = 1 INCOMPLETELY_DOMINANT = 2 RECESSIVE = 3 TreatmentResistance = Dict[HeterozygousResistance, float] class TreatmentParams(ABC): """ Abstract class for all the treatments """ name = "" def __init__(self, payload): self.quadratic_fish_mortality_coeffs = np.array(payload["quadratic_fish_mortality_coeffs"]) self.effect_delay: int = payload["effect_delay"] self.application_period: int = payload["application_period"] @staticmethod def parse_pheno_resistance(pheno_resistance_dict: dict) -> TreatmentResistance: return {HeterozygousResistance[key.upper()]: val for key, val in pheno_resistance_dict.items()} def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float: """Get the mortality percentage point difference increase. :param temperature: the temperature in Celsius :param fish_mass: the fish mass (in grams) :returns: Mortality percentage point difference increase """ # TODO: is this the right way to solve this? fish_mass_indicator = 1 if fish_mass > 2000 else 0 input = np.array([1, temperature, fish_mass_indicator, temperature ** 2, temperature * fish_mass_indicator, fish_mass_indicator ** 2]) return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0) @abstractmethod def delay(self, average_temperature: float): # pragma: no cover """ Delay before treatment should have a noticeable effect """ @staticmethod def get_allele_heterozygous_trait(alleles: Alleles): """ Get the allele heterozygous type """ # should we move this? if 'A' in alleles: if 'a' in alleles: trait = HeterozygousResistance.INCOMPLETELY_DOMINANT else: trait = HeterozygousResistance.DOMINANT else: trait = HeterozygousResistance.RECESSIVE return trait @abstractmethod def get_lice_treatment_mortality_rate( self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib: """ Calculate the mortality rates of this treatment """ def get_fish_mortality_occurrences( self, temperature: float, fish_mass: float, num_fish: float, efficacy_window: float, mortality_events: int ): """Get the number of fish that die due to treatment :param temperature: the temperature of the cage :param num_fish: the number of fish :param fish_mass: the average fish mass (in grams) :param efficacy_window: the length of the efficacy window :param mortality_events: the number of fish mortality events to subtract from """ predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass) mortality_events_pp = 100 * mortality_events / num_fish predicted_deaths = ((predicted_pp_increase + mortality_events_pp) * num_fish / 100) \ - mortality_events predicted_deaths /= efficacy_window return predicted_deaths class ChemicalTreatment(TreatmentParams): """Trait for all chemical treatments""" def __init__(self, payload): super().__init__(payload) self.pheno_resistance = self.parse_pheno_resistance(payload["pheno_resistance"]) self.price_per_kg = Money(payload["price_per_kg"]) self.durability_temp_ratio: float = payload["durability_temp_ratio"] class ThermalTreatment(TreatmentParams): """Trait for all thermal-based treatments""" def __init__(self, payload): super().__init__(payload) self.price_per_application = Money(payload["price_per_application"]) # NOTE: these are currently unused # self.exposure_temperature: float = payload["exposure_temperature"] # self.exposure_length: float = payload["efficacy"] class EMB(ChemicalTreatment): """Emamectin Benzoate""" name = "EMB" def delay(self, average_temperature: float): return self.durability_temp_ratio / average_temperature def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, _temperature=None): susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in LicePopulation.susceptible_stages] num_susc_per_geno = GenoDistrib.batch_sum(susceptible_populations) geno_treatment_distrib = {geno: GenoTreatmentValue(0.0, 0) for geno in num_susc_per_geno} for geno, num_susc in num_susc_per_geno.items(): trait = self.get_allele_heterozygous_trait(geno) susceptibility_factor = 1.0 - self.pheno_resistance[trait] geno_treatment_distrib[geno] = GenoTreatmentValue(susceptibility_factor, cast(int, num_susc)) return geno_treatment_distrib class Thermolicer(ThermalTreatment): name = "Thermolicer" def delay(self, _): return 1 # effects noticeable the next day def get_lice_treatment_mortality_rate( self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib: if temperature >= 12: efficacy = 0.8 else: efficacy = 0.99 susceptible_populations = [lice_population.geno_by_lifestage[stage] for stage in LicePopulation.susceptible_stages] num_susc_per_geno = cast(GenoDistrib, GenoDistrib.batch_sum(susceptible_populations)) geno_treatment_distrib = {geno: GenoTreatmentValue(efficacy, cast(int, num_susc)) for geno, num_susc in num_susc_per_geno.items()} return geno_treatment_distrib
slim/types/TreatmentTypes.py
6,598
Trait for all chemical treatments Emamectin Benzoate Genetic mechanism to be used when generating egg genotypes Resistance in a monogenic, heterozygous setting. Trait for all thermal-based treatments A stub for treatment types TODO: add other treatments here Abstract class for all the treatments Get the mortality percentage point difference increase. :param temperature: the temperature in Celsius :param fish_mass: the fish mass (in grams) :returns: Mortality percentage point difference increase Delay before treatment should have a noticeable effect Get the allele heterozygous type Get the number of fish that die due to treatment :param temperature: the temperature of the cage :param num_fish: the number of fish :param fish_mass: the average fish mass (in grams) :param efficacy_window: the length of the efficacy window :param mortality_events: the number of fish mortality events to subtract from Calculate the mortality rates of this treatment A few extra general types TODO: is this the right way to solve this? pragma: no cover should we move this? NOTE: these are currently unused self.exposure_temperature: float = payload["exposure_temperature"] self.exposure_length: float = payload["efficacy"] effects noticeable the next day
1,248
en
0.841336
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.logger._json}. """ from io import BytesIO, StringIO from typing import IO, Any, List, Optional, Sequence, cast from zope.interface import implementer from zope.interface.exceptions import BrokenMethodImplementation from zope.interface.verify import verifyObject from twisted.python.failure import Failure from twisted.trial.unittest import TestCase from .._flatten import extractField from .._format import formatEvent from .._global import globalLogPublisher from .._interfaces import ILogObserver, LogEvent from .._json import ( eventAsJSON, eventFromJSON, eventsFromJSONLogFile, jsonFileLogObserver, log as jsonLog, ) from .._levels import LogLevel from .._logger import Logger from .._observer import LogPublisher def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str: """ Assert a few things about the result of L{eventAsJSON}, then return it. @param testCase: The L{TestCase} with which to perform the assertions. @param savedJSON: The result of L{eventAsJSON}. @return: C{savedJSON} @raise AssertionError: If any of the preconditions fail. """ testCase.assertIsInstance(savedJSON, str) testCase.assertEqual(savedJSON.count("\n"), 0) return savedJSON class SaveLoadTests(TestCase): """ Tests for loading and saving log events. """ def savedEventJSON(self, event: LogEvent) -> str: """ Serialize some an events, assert some things about it, and return the JSON. @param event: An event. @return: JSON. """ return savedJSONInvariants(self, eventAsJSON(event)) def test_simpleSaveLoad(self) -> None: """ Saving and loading an empty dictionary results in an empty dictionary. """ self.assertEqual(eventFromJSON(self.savedEventJSON({})), {}) def test_saveLoad(self) -> None: """ Saving and loading a dictionary with some simple values in it results in those same simple values in the output; according to JSON's rules, though, all dictionary keys must be L{str} and any non-L{str} keys will be converted. """ self.assertEqual( eventFromJSON(self.savedEventJSON({1: 2, "3": "4"})), # type: ignore[dict-item] {"1": 2, "3": "4"}, ) def test_saveUnPersistable(self) -> None: """ Saving and loading an object which cannot be represented in JSON will result in a placeholder. """ self.assertEqual( eventFromJSON(self.savedEventJSON({"1": 2, "3": object()})), {"1": 2, "3": {"unpersistable": True}}, ) def test_saveNonASCII(self) -> None: """ Non-ASCII keys and values can be saved and loaded. """ self.assertEqual( eventFromJSON(self.savedEventJSON({"\u1234": "\u4321", "3": object()})), {"\u1234": "\u4321", "3": {"unpersistable": True}}, ) def test_saveBytes(self) -> None: """ Any L{bytes} objects will be saved as if they are latin-1 so they can be faithfully re-loaded. """ inputEvent = {"hello": bytes(range(255))} # On Python 3, bytes keys will be skipped by the JSON encoder. Not # much we can do about that. Let's make sure that we don't get an # error, though. inputEvent.update({b"skipped": "okay"}) # type: ignore[dict-item] self.assertEqual( eventFromJSON(self.savedEventJSON(inputEvent)), {"hello": bytes(range(255)).decode("charmap")}, ) def test_saveUnPersistableThenFormat(self) -> None: """ Saving and loading an object which cannot be represented in JSON, but has a string representation which I{can} be saved as JSON, will result in the same string formatting; any extractable fields will retain their data types. """ class Reprable: def __init__(self, value: object) -> None: self.value = value def __repr__(self) -> str: return "reprable" inputEvent = {"log_format": "{object} {object.value}", "object": Reprable(7)} outputEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertEqual(formatEvent(outputEvent), "reprable 7") def test_extractingFieldsPostLoad(self) -> None: """ L{extractField} can extract fields from an object that's been saved and loaded from JSON. """ class Obj: def __init__(self) -> None: self.value = 345 inputEvent = dict(log_format="{object.value}", object=Obj()) loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertEqual(extractField("object.value", loadedEvent), 345) # The behavior of extractField is consistent between pre-persistence # and post-persistence events, although looking up the key directly # won't be: self.assertRaises(KeyError, extractField, "object", loadedEvent) self.assertRaises(KeyError, extractField, "object", inputEvent) def test_failureStructurePreserved(self) -> None: """ Round-tripping a failure through L{eventAsJSON} preserves its class and structure. """ events: List[LogEvent] = [] log = Logger(observer=cast(ILogObserver, events.append)) try: 1 / 0 except ZeroDivisionError: f = Failure() log.failure("a message about failure", f) self.assertEqual(len(events), 1) loaded = eventFromJSON(self.savedEventJSON(events[0]))["log_failure"] self.assertIsInstance(loaded, Failure) self.assertTrue(loaded.check(ZeroDivisionError)) self.assertIsInstance(loaded.getTraceback(), str) def test_saveLoadLevel(self) -> None: """ It's important that the C{log_level} key remain a L{constantly.NamedConstant} object. """ inputEvent = dict(log_level=LogLevel.warn) loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent)) self.assertIs(loadedEvent["log_level"], LogLevel.warn) def test_saveLoadUnknownLevel(self) -> None: """ If a saved bit of JSON (let's say, from a future version of Twisted) were to persist a different log_level, it will resolve as None. """ loadedEvent = eventFromJSON( '{"log_level": {"name": "other", ' '"__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}' ) self.assertEqual(loadedEvent, dict(log_level=None)) class FileLogObserverTests(TestCase): """ Tests for L{jsonFileLogObserver}. """ def test_interface(self) -> None: """ A L{FileLogObserver} returned by L{jsonFileLogObserver} is an L{ILogObserver}. """ with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle) try: verifyObject(ILogObserver, observer) except BrokenMethodImplementation as e: self.fail(e) def assertObserverWritesJSON(self, recordSeparator: str = "\x1e") -> None: """ Asserts that an observer created by L{jsonFileLogObserver} with the given arguments writes events serialized as JSON text, using the given record separator. @param recordSeparator: C{recordSeparator} argument to L{jsonFileLogObserver} """ with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle, recordSeparator) event = dict(x=1) observer(event) self.assertEqual(fileHandle.getvalue(), f'{recordSeparator}{{"x": 1}}\n') def test_observeWritesDefaultRecordSeparator(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. By default, the record separator is C{"\\x1e"}. """ self.assertObserverWritesJSON() def test_observeWritesEmptyRecordSeparator(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. This test sets the record separator to C{""}. """ self.assertObserverWritesJSON(recordSeparator="") def test_failureFormatting(self) -> None: """ A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures serialized as JSON text to a file when it observes events. """ io = StringIO() publisher = LogPublisher() logged: List[LogEvent] = [] publisher.addObserver(cast(ILogObserver, logged.append)) publisher.addObserver(jsonFileLogObserver(io)) logger = Logger(observer=publisher) try: 1 / 0 except BaseException: logger.failure("failed as expected") reader = StringIO(io.getvalue()) deserialized = list(eventsFromJSONLogFile(reader)) def checkEvents(logEvents: Sequence[LogEvent]) -> None: self.assertEqual(len(logEvents), 1) [failureEvent] = logEvents self.assertIn("log_failure", failureEvent) failureObject = failureEvent["log_failure"] self.assertIsInstance(failureObject, Failure) tracebackObject = failureObject.getTracebackObject() self.assertEqual( tracebackObject.tb_frame.f_code.co_filename.rstrip("co"), __file__.rstrip("co"), ) checkEvents(logged) checkEvents(deserialized) class LogFileReaderTests(TestCase): """ Tests for L{eventsFromJSONLogFile}. """ def setUp(self) -> None: self.errorEvents: List[LogEvent] = [] @implementer(ILogObserver) def observer(event: LogEvent) -> None: if event["log_namespace"] == jsonLog.namespace and "record" in event: self.errorEvents.append(event) self.logObserver = observer globalLogPublisher.addObserver(observer) def tearDown(self) -> None: globalLogPublisher.removeObserver(self.logObserver) def _readEvents( self, inFile: IO[Any], recordSeparator: Optional[str] = None, bufferSize: int = 4096, ) -> None: """ Test that L{eventsFromJSONLogFile} reads two pre-defined events from a file: C{{"x": 1}} and C{{"y": 2}}. @param inFile: C{inFile} argument to L{eventsFromJSONLogFile} @param recordSeparator: C{recordSeparator} argument to L{eventsFromJSONLogFile} @param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile} """ events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize)) self.assertEqual(next(events), {"x": 1}) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events def test_readEventsAutoWithRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{"\\x1e"} as the record separator. """ with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle: self._readEvents(fileHandle) self.assertEqual(len(self.errorEvents), 0) def test_readEventsAutoEmptyRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{""} as the record separator. """ with StringIO('{"x": 1}\n' '{"y": 2}\n') as fileHandle: self._readEvents(fileHandle) self.assertEqual(len(self.errorEvents), 0) def test_readEventsExplicitRecordSeparator(self) -> None: """ L{eventsFromJSONLogFile} reads events from a file and is told to use a specific record separator. """ # Use "\x08" (backspace)... because that seems weird enough. with StringIO('\x08{"x": 1}\n' '\x08{"y": 2}\n') as fileHandle: self._readEvents(fileHandle, recordSeparator="\x08") self.assertEqual(len(self.errorEvents), 0) def test_readEventsPartialBuffer(self) -> None: """ L{eventsFromJSONLogFile} handles buffering a partial event. """ with StringIO('\x1e{"x": 1}\n' '\x1e{"y": 2}\n') as fileHandle: # Use a buffer size smaller than the event text. self._readEvents(fileHandle, bufferSize=1) self.assertEqual(len(self.errorEvents), 0) def test_readTruncated(self) -> None: """ If the JSON text for a record is truncated, skip it. """ with StringIO('\x1e{"x": 1' '\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read truncated JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1') def test_readUnicode(self) -> None: """ If the file being read from vends L{str}, strings decode from JSON as-is. """ # The Euro currency sign is "\u20ac" with StringIO('\x1e{"currency": "\u20ac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"currency": "\u20ac"}) self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readUTF8Bytes(self) -> None: """ If the file being read from vends L{bytes}, strings decode from JSON as UTF-8. """ # The Euro currency sign is b"\xe2\x82\xac" in UTF-8 with BytesIO(b'\x1e{"currency": "\xe2\x82\xac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) # The Euro currency sign is "\u20ac" self.assertEqual(next(events), {"currency": "\u20ac"}) self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readTruncatedUTF8Bytes(self) -> None: """ If the JSON text for a record is truncated in the middle of a two-byte Unicode codepoint, we don't want to see a codec exception and the stream is read properly when the additional data arrives. """ # The Euro currency sign is "\u20ac" and encodes in UTF-8 as three # bytes: b"\xe2\x82\xac". with BytesIO(b'\x1e{"x": "\xe2\x82\xac"}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle, bufferSize=8)) self.assertEqual(next(events), {"x": "\u20ac"}) # Got text self.assertRaises(StopIteration, next, events) # No more events self.assertEqual(len(self.errorEvents), 0) def test_readInvalidUTF8Bytes(self) -> None: """ If the JSON text for a record contains invalid UTF-8 text, ignore that record. """ # The string b"\xe2\xac" is bogus with BytesIO(b'\x1e{"x": "\xe2\xac"}\n' b'\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to decode UTF-8 for JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": "\xe2\xac"}\n') def test_readInvalidJSON(self) -> None: """ If the JSON text for a record is invalid, skip it. """ with StringIO('\x1e{"x": }\n' '\x1e{"y": 2}\n') as fileHandle: events = iter(eventsFromJSONLogFile(fileHandle)) self.assertEqual(next(events), {"y": 2}) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": }\n') def test_readUnseparated(self) -> None: """ Multiple events without a record separator are skipped. """ with StringIO('\x1e{"x": 1}\n' '{"y": 2}\n') as fileHandle: events = eventsFromJSONLogFile(fileHandle) self.assertRaises(StopIteration, next, events) # No more events # We should have logged the lost record self.assertEqual(len(self.errorEvents), 1) self.assertEqual( self.errorEvents[0]["log_format"], "Unable to read JSON record: {record!r}", ) self.assertEqual(self.errorEvents[0]["record"], b'{"x": 1}\n{"y": 2}\n') def test_roundTrip(self) -> None: """ Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver} and read by L{eventsFromJSONLogFile} is reconstructed properly. """ event = dict(x=1) with StringIO() as fileHandle: observer = jsonFileLogObserver(fileHandle) observer(event) fileHandle.seek(0) events = eventsFromJSONLogFile(fileHandle) self.assertEqual(tuple(events), (event,)) self.assertEqual(len(self.errorEvents), 0)
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
18,274
Tests for L{jsonFileLogObserver}. Tests for L{eventsFromJSONLogFile}. Tests for loading and saving log events. Test that L{eventsFromJSONLogFile} reads two pre-defined events from a file: C{{"x": 1}} and C{{"y": 2}}. @param inFile: C{inFile} argument to L{eventsFromJSONLogFile} @param recordSeparator: C{recordSeparator} argument to L{eventsFromJSONLogFile} @param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile} Asserts that an observer created by L{jsonFileLogObserver} with the given arguments writes events serialized as JSON text, using the given record separator. @param recordSeparator: C{recordSeparator} argument to L{jsonFileLogObserver} Serialize some an events, assert some things about it, and return the JSON. @param event: An event. @return: JSON. Assert a few things about the result of L{eventAsJSON}, then return it. @param testCase: The L{TestCase} with which to perform the assertions. @param savedJSON: The result of L{eventAsJSON}. @return: C{savedJSON} @raise AssertionError: If any of the preconditions fail. L{extractField} can extract fields from an object that's been saved and loaded from JSON. A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures serialized as JSON text to a file when it observes events. Round-tripping a failure through L{eventAsJSON} preserves its class and structure. A L{FileLogObserver} returned by L{jsonFileLogObserver} is an L{ILogObserver}. A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. By default, the record separator is C{"\x1e"}. A L{FileLogObserver} created by L{jsonFileLogObserver} writes events serialzed as JSON text to a file when it observes events. This test sets the record separator to C{""}. L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{""} as the record separator. L{eventsFromJSONLogFile} reads events from a file and automatically detects use of C{"\x1e"} as the record separator. L{eventsFromJSONLogFile} reads events from a file and is told to use a specific record separator. L{eventsFromJSONLogFile} handles buffering a partial event. If the JSON text for a record is invalid, skip it. If the JSON text for a record contains invalid UTF-8 text, ignore that record. If the JSON text for a record is truncated, skip it. If the JSON text for a record is truncated in the middle of a two-byte Unicode codepoint, we don't want to see a codec exception and the stream is read properly when the additional data arrives. If the file being read from vends L{bytes}, strings decode from JSON as UTF-8. If the file being read from vends L{str}, strings decode from JSON as-is. Multiple events without a record separator are skipped. Data written by a L{FileLogObserver} returned by L{jsonFileLogObserver} and read by L{eventsFromJSONLogFile} is reconstructed properly. Any L{bytes} objects will be saved as if they are latin-1 so they can be faithfully re-loaded. Saving and loading a dictionary with some simple values in it results in those same simple values in the output; according to JSON's rules, though, all dictionary keys must be L{str} and any non-L{str} keys will be converted. It's important that the C{log_level} key remain a L{constantly.NamedConstant} object. If a saved bit of JSON (let's say, from a future version of Twisted) were to persist a different log_level, it will resolve as None. Non-ASCII keys and values can be saved and loaded. Saving and loading an object which cannot be represented in JSON will result in a placeholder. Saving and loading an object which cannot be represented in JSON, but has a string representation which I{can} be saved as JSON, will result in the same string formatting; any extractable fields will retain their data types. Saving and loading an empty dictionary results in an empty dictionary. Tests for L{twisted.logger._json}. Copyright (c) Twisted Matrix Laboratories. See LICENSE for details. type: ignore[dict-item] On Python 3, bytes keys will be skipped by the JSON encoder. Not much we can do about that. Let's make sure that we don't get an error, though. type: ignore[dict-item] The behavior of extractField is consistent between pre-persistence and post-persistence events, although looking up the key directly won't be: No more events Use "\x08" (backspace)... because that seems weird enough. Use a buffer size smaller than the event text. No more events We should have logged the lost record The Euro currency sign is "\u20ac" No more events The Euro currency sign is b"\xe2\x82\xac" in UTF-8 The Euro currency sign is "\u20ac" No more events The Euro currency sign is "\u20ac" and encodes in UTF-8 as three bytes: b"\xe2\x82\xac". Got text No more events The string b"\xe2\xac" is bogus No more events We should have logged the lost record No more events We should have logged the lost record No more events We should have logged the lost record
4,957
en
0.860309
# -*- coding: utf-8 -*- ############################################################################### # # RetrieveCoupon # Retrieves a coupon with specified coupon id. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class RetrieveCoupon(Choreography): def __init__(self, temboo_session): """ Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(RetrieveCoupon, self).__init__(temboo_session, '/Library/Stripe/Coupons/RetrieveCoupon') def new_input_set(self): return RetrieveCouponInputSet() def _make_result_set(self, result, path): return RetrieveCouponResultSet(result, path) def _make_execution(self, session, exec_id, path): return RetrieveCouponChoreographyExecution(session, exec_id, path) class RetrieveCouponInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the RetrieveCoupon Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe) """ super(RetrieveCouponInputSet, self)._set_input('APIKey', value) def set_CouponID(self, value): """ Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve) """ super(RetrieveCouponInputSet, self)._set_input('CouponID', value) class RetrieveCouponResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the RetrieveCoupon Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe) """ return self._output.get('Response', None) class RetrieveCouponChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return RetrieveCouponResultSet(response, path)
temboo/Library/Stripe/Coupons/RetrieveCoupon.py
3,185
An InputSet with methods appropriate for specifying the inputs to the RetrieveCoupon Choreo. The InputSet object is used to specify input parameters when executing this Choreo. A ResultSet with methods tailored to the values returned by the RetrieveCoupon Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe) Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe) Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve) -*- coding: utf-8 -*- RetrieveCoupon Retrieves a coupon with specified coupon id. Python versions 2.6, 2.7, 3.x Copyright 2014, Temboo Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
1,480
en
0.729086
# coding=utf-8 # Copyright (c) 2019 Alibaba PAI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import tensorflow as tf def build_kd_loss(teacher_logits, student_logits, task_balance=0.3, distill_tempreture=2.0, labels=None, loss_type='mse'): if loss_type == 'mse': # mean square error return mse_loss(teacher_logits, student_logits) elif loss_type == 'xent': # cross entropy return xent_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance) else: # kl divergence return kld_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance) def mse_loss(teacher_logits, student_logits): loss = tf.reduce_mean(tf.nn.l2_loss(teacher_logits - student_logits)) return loss def xent_loss(teacher_logits, student_logits, labels, distill_tempreture, task_balance): student_task_xent = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(labels), logits=student_logits)) teacher_targets = tf.nn.softmax(teacher_logits / distill_tempreture) student_distill_xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( labels=tf.stop_gradient(teacher_targets), logits=student_logits)) losses = task_balance * student_task_xent losses += (1 - task_balance) * student_distill_xent return losses def kld_loss(teacher_logits, student_logits, labels, distill_temperature, task_balance): student_task_xent = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=tf.squeeze(labels), logits=student_logits) student_distill = tf.reduce_sum(tf.nn.softmax(student_logits / distill_temperature) * ( tf.log(tf.nn.softmax(student_logits / distill_temperature + 1e-5) - tf.log(tf.nn.softmax(teacher_logits / distill_temperature + 1e-5))))) losses = task_balance * tf.reduce_mean(student_task_xent) losses += (1 - task_balance) * tf.reduce_mean(student_distill) return losses def build_kd_probes_loss(teacher_logits, student_logits, task_balance=0.3, distill_tempreture=2.0, labels=None, loss_type='mse'): teacher_n_layers = len(teacher_logits) - 1 student_n_layers = len(student_logits) - 1 probes_kd_loss = 0.0 for i in range(student_n_layers): proportional_layer_idx = int(math.ceil(i * teacher_n_layers / student_n_layers)) student_layer_logits = student_logits[i] teacher_layer_logits = teacher_logits[proportional_layer_idx] probes_kd_loss += build_kd_loss(teacher_logits=teacher_layer_logits, student_logits=student_layer_logits, task_balance=task_balance, distill_tempreture=distill_tempreture, labels=labels, loss_type=loss_type) return probes_kd_loss
easytransfer/losses/kd_loss.py
3,746
coding=utf-8 Copyright (c) 2019 Alibaba PAI team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mean square error cross entropy kl divergence
617
en
0.852806
# -*- coding: utf-8 -*- """Look command.""" # Part of Clockwork MUD Server (https://github.com/whutch/cwmud) # :copyright: (c) 2008 - 2017 Will Hutcheson # :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt) from .. import Command, COMMANDS from ...characters import CharacterShell @COMMANDS.register class LookCommand(Command): """A command to allow a character to look at things.""" def _action(self): char = self.session.char if not char: self.session.send("You're not playing a character!") return if not char.room: self.session.send("You're not in a room!") return char.show_room() CharacterShell.add_verbs(LookCommand, "look", "l")
cwmud/core/commands/info/look.py
753
A command to allow a character to look at things. Look command. -*- coding: utf-8 -*- Part of Clockwork MUD Server (https://github.com/whutch/cwmud) :copyright: (c) 2008 - 2017 Will Hutcheson :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
265
en
0.756116
# coding: utf-8 # # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. # import pprint import re # noqa: F401 import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class Status(Enum): """ Status of a resource. Allowed enum values: [FAILED, IN_PROGRESS, SUCCEEDED] """ FAILED = "FAILED" IN_PROGRESS = "IN_PROGRESS" SUCCEEDED = "SUCCEEDED" def to_dict(self): # type: () -> Dict[str, Any] """Returns the model properties as a dict""" result = {self.name: self.value} return result def to_str(self): # type: () -> str """Returns the string representation of the model""" return pprint.pformat(self.value) def __repr__(self): # type: () -> str """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): # type: (Any) -> bool """Returns true if both objects are equal""" if not isinstance(other, Status): return False return self.__dict__ == other.__dict__ def __ne__(self, other): # type: (Any) -> bool """Returns true if both objects are not equal""" return not self == other
ask-smapi-model/ask_smapi_model/v1/skill/status.py
1,814
Status of a resource. Allowed enum values: [FAILED, IN_PROGRESS, SUCCEEDED] Returns true if both objects are equal Returns true if both objects are not equal For `print` and `pprint` Returns the model properties as a dict Returns the string representation of the model coding: utf-8 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. noqa: F401 type: () -> Dict[str, Any] type: () -> str type: () -> str type: (Any) -> bool type: (Any) -> bool
934
en
0.831765
""" Modified from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/storage.py """ import torch from torch.utils.data.sampler import BatchSampler from torch.utils.data.sampler import SubsetRandomSampler class RolloutStorage(object): def __init__(self, num_steps, num_processes, obs_shape, action_space, state_size): self.observations = torch.zeros(num_steps + 1, num_processes, *obs_shape) self.states = torch.zeros(num_steps + 1, num_processes, state_size) self.rewards = torch.zeros(num_steps, num_processes, 1) self.value_preds = torch.zeros(num_steps + 1, num_processes, 1) self.returns = torch.zeros(num_steps + 1, num_processes, 1) self.action_log_probs = torch.zeros(num_steps, num_processes, 1) if action_space.__class__.__name__ == 'Discrete': action_shape = 1 else: action_shape = action_space.shape[0] self.actions = torch.zeros(num_steps, num_processes, action_shape) if action_space.__class__.__name__ == 'Discrete': self.actions = self.actions.long() self.masks = torch.ones(num_steps + 1, num_processes, 1) def cuda(self): self.observations = self.observations.cuda() self.states = self.states.cuda() self.rewards = self.rewards.cuda() self.value_preds = self.value_preds.cuda() self.returns = self.returns.cuda() self.action_log_probs = self.action_log_probs.cuda() self.actions = self.actions.cuda() self.masks = self.masks.cuda() def insert(self, step, current_obs, state, action, action_log_prob, value_pred, reward, mask): self.observations[step + 1].copy_(current_obs) self.states[step + 1].copy_(state) self.actions[step].copy_(action) self.action_log_probs[step].copy_(action_log_prob) self.value_preds[step].copy_(value_pred) self.rewards[step].copy_(reward) self.masks[step + 1].copy_(mask) def after_update(self): self.observations[0].copy_(self.observations[-1]) self.states[0].copy_(self.states[-1]) self.masks[0].copy_(self.masks[-1]) def compute_returns(self, next_value, use_gae, gamma, tau): if use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.size(0))): delta = self.rewards[step] + gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + gamma * tau * self.masks[step + 1] * gae self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.size(0))): self.returns[step] = self.returns[step + 1] * \ gamma * self.masks[step + 1] + self.rewards[step] def feed_forward_generator(self, advantages, num_mini_batch): num_steps, num_processes = self.rewards.size()[0:2] batch_size = num_processes * num_steps assert batch_size >= num_mini_batch, "ppo req batch size to be greater than number of mini batches" mini_batch_size = batch_size // num_mini_batch sampler = BatchSampler( SubsetRandomSampler(range(batch_size)), mini_batch_size, drop_last=False) for indices in sampler: indices = torch.LongTensor(indices) if advantages.is_cuda: indices = indices.cuda() observations_batch = self.observations[:-1].view( -1, *self.observations.size()[2:])[indices] states_batch = self.states[:-1].view(-1, self.states.size(-1))[indices] actions_batch = self.actions.view(-1, self.actions.size(-1))[indices] return_batch = self.returns[:-1].view(-1, 1)[indices] masks_batch = self.masks[:-1].view(-1, 1)[indices] old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices] adv_targ = advantages.view(-1, 1)[indices] yield observations_batch, states_batch, actions_batch, \ return_batch, masks_batch, old_action_log_probs_batch, adv_targ def recurrent_generator(self, advantages, num_mini_batch): num_processes = self.rewards.size(1) num_envs_per_batch = num_processes // num_mini_batch perm = torch.randperm(num_processes) for start_ind in range(0, num_processes, num_envs_per_batch): observations_batch = [] states_batch = [] actions_batch = [] return_batch = [] masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] #pdb.set_trace() for offset in range(num_envs_per_batch): ind = perm[start_ind + offset] observations_batch.append(self.observations[:-1, ind]) states_batch.append(self.states[:-1, ind]) actions_batch.append(self.actions[:, ind]) return_batch.append(self.returns[:-1, ind]) masks_batch.append(self.masks[:-1, ind]) old_action_log_probs_batch.append( self.action_log_probs[:, ind]) adv_targ.append(advantages[:, ind]) #pdb.set_trace() observations_batch = torch.cat(observations_batch, 0) states_batch = torch.cat(states_batch, 0) actions_batch = torch.cat(actions_batch, 0) return_batch = torch.cat(return_batch, 0) masks_batch = torch.cat(masks_batch, 0) old_action_log_probs_batch = torch.cat(old_action_log_probs_batch, 0) adv_targ = torch.cat(adv_targ, 0) yield observations_batch, states_batch, actions_batch, \ return_batch, masks_batch, old_action_log_probs_batch, adv_targ
ADMCode/snuz/ppo/storage.py
6,477
Modified from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/storage.py pdb.set_trace()pdb.set_trace()
121
en
0.493009
# -*- coding: utf-8 -*- from __future__ import unicode_literals import collections from django.db import migrations import mptt import mptt.managers def copy_regulations(apps, schema_editor): Regulation = apps.get_model('regcore', 'Regulation') Document = apps.get_model('regcore', 'Document') for reg in Regulation.objects.all(): data = { field.name: getattr(reg, field.name) for field in Regulation._meta.fields if field.name not in {'parent'} } doc = Document(doc_type='cfr', **data) doc.parent_id = reg.parent_id doc.save() def uncopy_regulations(apps, schema_editor): Regulation = apps.get_model('regcore', 'Regulation') Document = apps.get_model('regcore', 'Document') for doc in Document.objects.filter(doc_type='cfr'): data = { field.name: getattr(doc, field.name) for field in Regulation._meta.fields if field.name not in {'parent'} } reg = Regulation(**data) reg.parent_id = doc.parent_id reg.save() def copy_preambles(apps, schema_editor): Preamble = apps.get_model('regcore', 'Preamble') Document = apps.get_model('regcore', 'Document') # Bind manager manager = mptt.managers.TreeManager() manager.model = Document mptt.register(Document) manager.contribute_to_class(Document, 'objects') for pre in Preamble.objects.all(): write_node(Document, pre.data, 'preamble', pre.data['label']) def uncopy_preambles(apps, schema_editor): Preamble = apps.get_model('regcore', 'Preamble') Document = apps.get_model('regcore', 'Document') # Bind manager manager = mptt.managers.TreeManager() manager.model = Document mptt.register(Document) manager.contribute_to_class(Document, 'objects') for doc in Document.objects.filter(doc_type='preamble', root=True): nodes = doc.get_descendants(include_self=True) data = serialize(nodes[0], build_adjacency_map(nodes)) pre = Preamble(document_number=doc.label_string, data=data) pre.save() # Copy lightly modified import helpers def serialize(pre, adjacency_map): return { 'label': pre.label_string.split('-'), 'text': pre.text, 'node_type': pre.node_type, 'children': [ serialize(child, adjacency_map) for child in adjacency_map.get(pre.id, []) ], } def build_adjacency_map(regs): """Build mapping from node IDs to child records :param regs: List of `Regulation` records """ ret = collections.defaultdict(list) for reg in regs: if reg.parent_id is not None: ret[reg.parent_id].append(reg) return ret def write_node(Document, node, doc_type, label_id, version=None): to_save = [] labels_seen = set() def add_node(node, parent=None): label_tuple = tuple(node['label']) labels_seen.add(label_tuple) node['parent'] = parent to_save.append(node) for child in node['children']: add_node(child, parent=node) add_node(node) DMDocuments(Document).bulk_put(to_save, doc_type, label_id, version) def treeify(node, tree_id, pos=1, level=0): """Set tree properties in memory. """ node['tree_id'] = tree_id node['level'] = level node['left'] = pos for child in node.get('children', []): pos = treeify(child, tree_id, pos=pos + 1, level=level + 1) pos = pos + 1 node['right'] = pos return pos def build_id(reg, version=None): if version is not None: return '{}:{}'.format(version, '-'.join(reg['label'])) return '-'.join(reg['label']) class DMDocuments(object): def __init__(self, Document): self.Document = Document def _transform(self, reg, doc_type, version=None): """Create the Django object""" return self.Document( id=build_id(reg, version), doc_type=doc_type, version=version, parent_id=( build_id(reg['parent'], version) if reg.get('parent') else None ), tree_id=reg['tree_id'], level=reg['level'], lft=reg['left'], rght=reg['right'], label_string='-'.join(reg['label']), text=reg['text'], title=reg.get('title', ''), node_type=reg['node_type'], root=(len(reg['label']) == 1), ) def bulk_put(self, regs, doc_type, root_label, version): self.Document.objects.filter( version=version, doc_type=doc_type, label_string__startswith=root_label, ).delete() treeify(regs[0], self.Document.objects._get_next_tree_id()) self.Document.objects.bulk_create( [self._transform(r, doc_type, version) for r in regs], batch_size=25) class Migration(migrations.Migration): dependencies = [ ('regcore', '0011_create_document'), ] operations = [ migrations.RunPython(copy_regulations, uncopy_regulations), migrations.RunPython(copy_preambles, uncopy_preambles), ]
regcore/migrations/0012_migrate_documents.py
5,236
Create the Django object Build mapping from node IDs to child records :param regs: List of `Regulation` records Set tree properties in memory. -*- coding: utf-8 -*- Bind manager Bind manager Copy lightly modified import helpers
234
en
0.705053
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetWorkspaceConnectionResult', 'AwaitableGetWorkspaceConnectionResult', 'get_workspace_connection', ] @pulumi.output_type class GetWorkspaceConnectionResult: """ Workspace connection. """ def __init__(__self__, auth_type=None, category=None, id=None, name=None, target=None, type=None, value=None): if auth_type and not isinstance(auth_type, str): raise TypeError("Expected argument 'auth_type' to be a str") pulumi.set(__self__, "auth_type", auth_type) if category and not isinstance(category, str): raise TypeError("Expected argument 'category' to be a str") pulumi.set(__self__, "category", category) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if target and not isinstance(target, str): raise TypeError("Expected argument 'target' to be a str") pulumi.set(__self__, "target", target) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if value and not isinstance(value, str): raise TypeError("Expected argument 'value' to be a str") pulumi.set(__self__, "value", value) @property @pulumi.getter(name="authType") def auth_type(self) -> Optional[str]: """ Authorization type of the workspace connection. """ return pulumi.get(self, "auth_type") @property @pulumi.getter def category(self) -> Optional[str]: """ Category of the workspace connection. """ return pulumi.get(self, "category") @property @pulumi.getter def id(self) -> str: """ ResourceId of the workspace connection. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Friendly name of the workspace connection. """ return pulumi.get(self, "name") @property @pulumi.getter def target(self) -> Optional[str]: """ Target of the workspace connection. """ return pulumi.get(self, "target") @property @pulumi.getter def type(self) -> str: """ Resource type of workspace connection. """ return pulumi.get(self, "type") @property @pulumi.getter def value(self) -> Optional[str]: """ Value details of the workspace connection. """ return pulumi.get(self, "value") class AwaitableGetWorkspaceConnectionResult(GetWorkspaceConnectionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetWorkspaceConnectionResult( auth_type=self.auth_type, category=self.category, id=self.id, name=self.name, target=self.target, type=self.type, value=self.value) def get_workspace_connection(connection_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceConnectionResult: """ Workspace connection. :param str connection_name: Friendly name of the workspace connection :param str resource_group_name: Name of the resource group in which workspace is located. :param str workspace_name: Name of Azure Machine Learning workspace. """ __args__ = dict() __args__['connectionName'] = connection_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200601:getWorkspaceConnection', __args__, opts=opts, typ=GetWorkspaceConnectionResult).value return AwaitableGetWorkspaceConnectionResult( auth_type=__ret__.auth_type, category=__ret__.category, id=__ret__.id, name=__ret__.name, target=__ret__.target, type=__ret__.type, value=__ret__.value)
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20200601/get_workspace_connection.py
4,906
Workspace connection. Authorization type of the workspace connection. Category of the workspace connection. Workspace connection. :param str connection_name: Friendly name of the workspace connection :param str resource_group_name: Name of the resource group in which workspace is located. :param str workspace_name: Name of Azure Machine Learning workspace. ResourceId of the workspace connection. Friendly name of the workspace connection. Target of the workspace connection. Resource type of workspace connection. Value details of the workspace connection. coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test
761
en
0.861981
import aiohttp, asyncio from bs4 import BeautifulSoup import json import time VC_SEARCH = "https://vc.ru/search/v2/content/new" async def parse_urls(key_word): async with aiohttp.ClientSession() as session: async with session.get(VC_SEARCH, params={ "query": key_word, "target_type": 'posts', }) as r: soup = BeautifulSoup(await r.text(), 'html.parser') urls = [x["href"] for x in soup.find_all("a", {"class": "content-feed__link"})] return urls async def get_text(url): async with aiohttp.ClientSession() as session: async with session.get(url) as r: soup = BeautifulSoup(await r.text(), 'html.parser') text = " ".join(map(lambda x: x.text, soup.find("div", {"class": "l-entry__content"}).find_all("p"))) return text async def get_all_texts(keyword): urls = await parse_urls(keyword) all_texts = [] for u in urls[:25]: text = await get_text(u) all_texts.append(text) return all_texts async def vc_get_data(keyword, result_file_path='result-vc.json'): texts = await get_all_texts(keyword) result_dict = {"company": keyword, "texts": texts} result_json = json.loads(json.dumps(result_dict)) return result_json #with open(result_file_path, 'w', encoding='utf-8') as f: # json.dump(result_json, f, ensure_ascii=False, indent=4) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(vc_get_data("сбер", "other/sber-vc.json"))
Parsers/vcru.py
1,580
with open(result_file_path, 'w', encoding='utf-8') as f: json.dump(result_json, f, ensure_ascii=False, indent=4)
112
en
0.718409
#!/usr/bin/env runaiida # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function __copyright__ = (u"Copyright (c), 2016, Forschungszentrum Jülich GmbH, " "IAS-1/PGI-1, Germany. All rights reserved.") __license__ = "MIT license, see LICENSE.txt file" __version__ = "0.27" __contributors__ = "Jens Broeder" from aiida import load_dbenv, is_dbenv_loaded if not is_dbenv_loaded(): load_dbenv() import sys import os from aiida.common.example_helpers import test_and_get_code from aiida.plugins import DataFactory from aiida_fleur.workflows.scf import FleurScfWorkChain # If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not # invoke the mpirun command in the submission script) run_in_serial_mode = True#False queue = None ################################################################ ParameterData = DataFactory('parameter') FleurinpData = DataFactory('fleur.fleurinp') try: dontsend = sys.argv[1] if dontsend == "--dont-send": submit_test = True elif dontsend == "--send": submit_test = False else: raise IndexError except IndexError: print(("The first parameter can only be either " "--send or --dont-send"), file=sys.stderr) sys.exit(1) try: codename = sys.argv[2] except IndexError: codename = None try: queue = sys.argv[3] except IndexError: queue = None ##### code = test_and_get_code(codename, expected_code_type='fleur.fleur') # get where tests folder is, then relative path inpxmlfile = '/usr/users/iff_th1/broeder/aiida/github/aiida-fleur/tests/inp_xml_files/W/inp.xml' fleurinp = FleurinpData(files = [inpxmlfile]) wf_para = Dict(dict={'fleur_runmax' : 4, 'density_criterion' : 0.000001,#}) 'queue_name' : 'th123_node', 'resources' : {"num_machines": 1, "num_mpiprocs_per_machine" : 12}, 'walltime_sec': 10*60, 'serial' : run_in_serial_mode}) if submit_test: print('workchain do not have so far a submit_test function') else: print("Running fleur_scf_wc") res = FleurScfWorkChain.run(wf_parameters=wf_para, fleurinp=fleurinp, fleur=code) #remote_data= remote, fleur=code)
examples/old_workflowtests/test_run_scf2.py
2,331
!/usr/bin/env runaiida -*- coding: utf-8 -*- If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not invoke the mpirun command in the submission script)False get where tests folder is, then relative path})remote_data= remote, fleur=code)
256
en
0.644342
""" Copyright (c) 2008-2020, Jesus Cea Avion <jcea@jcea.es> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Jesus Cea Avion nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import unittest import os, glob from .test_all import db, test_support, get_new_environment_path, \ get_new_database_path #---------------------------------------------------------------------- class DB(unittest.TestCase): def setUp(self): self.path = get_new_database_path() self.db = db.DB() def tearDown(self): self.db.close() del self.db test_support.unlink(self.path) class DB_general(DB) : def test_get_open_flags(self) : self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE) self.assertEqual(db.DB_CREATE, self.db.get_open_flags()) def test_get_open_flags2(self) : self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE | db.DB_THREAD) self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags()) def test_get_dbname_filename(self) : self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE) self.assertEqual((self.path, None), self.db.get_dbname()) def test_get_dbname_filename_database(self) : name = "jcea-random-name" self.db.open(self.path, dbname=name, dbtype=db.DB_HASH, flags = db.DB_CREATE) self.assertEqual((self.path, name), self.db.get_dbname()) def test_bt_minkey(self) : for i in [17, 108, 1030] : self.db.set_bt_minkey(i) self.assertEqual(i, self.db.get_bt_minkey()) def test_lorder(self) : self.db.set_lorder(1234) self.assertEqual(1234, self.db.get_lorder()) self.db.set_lorder(4321) self.assertEqual(4321, self.db.get_lorder()) self.assertRaises(db.DBInvalidArgError, self.db.set_lorder, 9182) def test_priority(self) : flags = [db.DB_PRIORITY_VERY_LOW, db.DB_PRIORITY_LOW, db.DB_PRIORITY_DEFAULT, db.DB_PRIORITY_HIGH, db.DB_PRIORITY_VERY_HIGH] for flag in flags : self.db.set_priority(flag) self.assertEqual(flag, self.db.get_priority()) def test_get_transactional(self) : self.assertFalse(self.db.get_transactional()) self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE) self.assertFalse(self.db.get_transactional()) class DB_hash(DB) : def test_h_ffactor(self) : for ffactor in [4, 16, 256] : self.db.set_h_ffactor(ffactor) self.assertEqual(ffactor, self.db.get_h_ffactor()) def test_h_nelem(self) : for nelem in [1, 2, 4] : nelem = nelem*1024*1024 # Millions self.db.set_h_nelem(nelem) self.assertEqual(nelem, self.db.get_h_nelem()) def test_pagesize(self) : for i in range(9, 17) : # From 512 to 65536 i = 1<<i self.db.set_pagesize(i) self.assertEqual(i, self.db.get_pagesize()) # The valid values goes from 512 to 65536 # Test 131072 bytes... self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17) # Test 256 bytes... self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8) class DB_txn(DB) : def setUp(self) : self.homeDir = get_new_environment_path() self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG | db.DB_INIT_TXN) self.db = db.DB(self.env) def tearDown(self) : self.db.close() del self.db self.env.close() del self.env test_support.rmtree(self.homeDir) def test_flags(self) : self.db.set_flags(db.DB_CHKSUM) self.assertEqual(db.DB_CHKSUM, self.db.get_flags()) self.db.set_flags(db.DB_TXN_NOT_DURABLE) self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM, self.db.get_flags()) def test_get_transactional(self) : self.assertFalse(self.db.get_transactional()) # DB_AUTO_COMMIT = Implicit transaction self.db.open("XXX", dbtype=db.DB_HASH, flags = db.DB_CREATE | db.DB_AUTO_COMMIT) self.assertTrue(self.db.get_transactional()) class DB_recno(DB) : def test_re_pad(self) : for i in [' ', '*'] : # Check chars self.db.set_re_pad(i) self.assertEqual(ord(i), self.db.get_re_pad()) for i in [97, 65] : # Check integers self.db.set_re_pad(i) self.assertEqual(i, self.db.get_re_pad()) def test_re_delim(self) : for i in [' ', '*'] : # Check chars self.db.set_re_delim(i) self.assertEqual(ord(i), self.db.get_re_delim()) for i in [97, 65] : # Check integers self.db.set_re_delim(i) self.assertEqual(i, self.db.get_re_delim()) def test_re_source(self) : for i in ["test", "test2", "test3"] : self.db.set_re_source(i) self.assertEqual(i, self.db.get_re_source()) class DB_queue(DB) : def test_re_len(self) : for i in [33, 65, 300, 2000] : self.db.set_re_len(i) self.assertEqual(i, self.db.get_re_len()) def test_q_extentsize(self) : for i in [1, 60, 100] : self.db.set_q_extentsize(i) self.assertEqual(i, self.db.get_q_extentsize()) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DB_general)) suite.addTest(unittest.makeSuite(DB_txn)) suite.addTest(unittest.makeSuite(DB_hash)) suite.addTest(unittest.makeSuite(DB_recno)) suite.addTest(unittest.makeSuite(DB_queue)) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
Lib3/bsddb/test/test_db.py
7,322
Copyright (c) 2008-2020, Jesus Cea Avion <jcea@jcea.es> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Jesus Cea Avion nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------- Millions From 512 to 65536 The valid values goes from 512 to 65536 Test 131072 bytes... Test 256 bytes... DB_AUTO_COMMIT = Implicit transaction Check chars Check integers Check chars Check integers
1,879
en
0.819945
from dataclasses import dataclass, field from datetime import datetime # for typehinting from typing import TYPE_CHECKING, Generator, List, Literal, Optional import aiohttp import dateparser from .exceptions import UnsupportedRegionError from .pricing import PriceQuery, query_price COUNT = 30 # Items per page of paginated response if TYPE_CHECKING: from .regions import Region # pragma: no cover @dataclass class RatingContent: id: int = None name: str = None type: Literal["descriptor", "interactive"] = None image_url: Optional[str] = None # JP Field svg_image_url: Optional[str] = None # JP Field def __init__(self, data) -> None: self.id = data['id'] self.name = data['name'] self.type = data['type'] if data.get('image_url'): self.image_url = data['image_url'] if data.get('svg_image_url'): self.svg_image_url = data['svg_image_url'] @dataclass class Rating: age: int = None id: int = None image_url: Optional[str] = None name: str = None provisional: bool = None svg_image_url: str = None def __init__(self, data) -> None: if (data['id']) == 0: return self.age = data['age'] self.id = data['id'] if data.get('image_url'): self.image_url = data['image_url'] self.provisional = data['provisional'] self.svg_image_url = data['svg_image_url'] @dataclass class RatingSystem: id: int = None name: str = None def __init__(self, data) -> None: self.id = data['id'] self.name = data['name'] @dataclass class Game: region: "Region" = None content_type: str = None # Literal["game", "bundle"] ??? expand and replace hint dominant_colors: List[str] = None formal_name: str = None hero_banner_url: str = None id: int = None is_new: bool = None membership_required: bool = None public_status: Literal["public"] = None rating_content: List[RatingContent] = field(default_factory=list) rating: Rating = None rating_system: RatingSystem = None release_date_on_eshop: datetime = None screenshots: List[str] = field(default_factory=list) strong_disclaimer: str = None tags: List = field(default_factory=list) target_titles: List = field(default_factory=list) def __init__(self, data, region) -> None: self.region = region self.content_type = data['content_type'] self.dominant_colors = data['dominant_colors'] self.formal_name = data['formal_name'] self.hero_banner_url = data['hero_banner_url'] self.id = data['id'] self.is_new = data['is_new'] self.membership_required = data['membership_required'] self.public_status = data['public_status'] self.rating_content = [RatingContent(c) for c in data['rating_info']['content_descriptors']] self.rating = Rating(data['rating_info']['rating']) self.rating_system = RatingSystem(data['rating_info']['rating_system']) # TODO: is this dateparser correct? self.release_date_on_eshop = dateparser.parse(data['release_date_on_eshop'], settings={'TIMEZONE': "UTC"}) self.screenshots = [s['images'][0]['url'] for s in data['screenshots']] self.strong_disclaimer = data.get('strong_disclaimer', None) self.tags = data['tags'] self.target_titles = data['target_titles'] async def query_price(self) -> PriceQuery: return await query_price(self.region, self) async def query_listing(region: "Region", type: Literal["sales", "new", "ranking"]) -> Generator[Game, None, None]: if not region.supports_listing: raise UnsupportedRegionError("Region does not support listings") if type not in ["sales", "new", "ranking"]: raise ValueError("Invalid type: " + type) lang, reg = region.culture_code.split('_') offset = 0 async with aiohttp.ClientSession() as session: while True: url = f'https://ec.nintendo.com/api/{reg}/{lang}/search/{type}?offset={offset}&count={COUNT}' async with session.get(url) as request: request.raise_for_status() data = await request.json() for game in data['contents']: yield Game(game, region) if (offset + COUNT) >= data['total']: break offset += COUNT
nsecpy/listing.py
4,463
for typehinting Items per page of paginated response pragma: no cover JP Field JP Field Literal["game", "bundle"] ??? expand and replace hint TODO: is this dateparser correct?
175
en
0.55199
from collections import defaultdict from typing import Dict, Tuple, Iterator, Callable, Any, Optional from dataclasses import dataclass """ Provides the `TaggedProfiler` class related to record profiling. TODO: Better description needed. """ @dataclass class TaggedProfilerRecordStatus: offset: int tag: str key: str val: Any r: Optional[dict] @dataclass class TaggedProfilerSummary: total: int histo: dict index: Optional[dict] cache: Optional[dict] def describe(self) -> Iterator[str]: yield f"histo = {self.histo}" if self.index is None: yield f"index = {self.index}" else: yield f"index with {len(self.index)} items:" for label, nums in self.index.items(): yield f"label = '{label}, size = {len(nums)}:" if self.cache is not None: for n in nums: yield f"cache[{n}] = {self.cache[n]}" class TaggedProfiler: """A useful tag-based profiler class which we'll describe when we have more time.""" def __init__(self, tagmap: Dict[str,Callable]): self.tagmap = tagmap def eval_dict(self, r: dict) -> Iterator[Tuple[str,str,str]]: for (tag, f) in self.tagmap.items(): for (k, v) in r.items(): if f(v): yield (tag, k, v) def evaluate(self, recs: Iterator[dict], deep: bool = False) -> Iterator[TaggedProfilerRecordStatus]: for (i, r) in enumerate(recs): for (tag, k, v) in self.eval_dict(r): yield TaggedProfilerRecordStatus(i, tag, k, v, r if deep else None) def profile(self, recs: Iterator[dict], index: bool = False, deep: bool = False) -> TaggedProfilerSummary: """Provides the most useful summary counts you'll likely want from the incoming record sequence. Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later.""" # We use underscores for all "recording" structures. # Non-nunderscore names for input variables and flags. labels = list(self.tagmap.keys()) temp_cache: Dict[int,Any] = {} temp_index: Dict[str,Any] = {k:defaultdict(int) for k in labels} for status in self.evaluate(recs, deep): temp_cache[status.offset] = status.r if deep else 1 temp_index[status.tag][status.offset] += 1 _total = len(temp_cache) _histo: Dict[str,int] = {k:len(v) for (k,v) in temp_index.items()} _index: Optional[Dict[str,list]] = None _cache: Optional[Dict[int,Any]] = None if temp_index: _index = {k:list(v.keys()) for k,v in temp_index.items()} if deep: _cache = temp_cache return TaggedProfilerSummary(_total, _histo, _index, _cache)
caixa/profile/tagged.py
2,865
A useful tag-based profiler class which we'll describe when we have more time. Provides the most useful summary counts you'll likely want from the incoming record sequence. Optional :index and :deep flags allow us to return special indexing and cachinc structs which we'll describe later. We use underscores for all "recording" structures. Non-nunderscore names for input variables and flags.
394
en
0.877636
import collections class CaseInsensitiveDict(collections.MutableMapping): """ A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = dict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, collections.Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items()))
anillo/utils/structures.py
2,399
A case-insensitive ``dict``-like object. Implements all methods and operations of ``collections.MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. Like iteritems(), but with all lowercase keys. Use the lowercased key for lookups, but store the actual key alongside the value. Compare insensitively Copy is required
1,087
en
0.806926
# -*- coding: utf-8 -*- # This is a simple mailbox polling script for the Sahana Messaging Module # If there is a need to collect from non-compliant mailers then suggest using the robust Fetchmail to collect & store in a more compliant mailer! # This script doesn't handle MIME attachments import sys, socket, email, uuid # Read-in configuration from Database settings = db(db.msg_email_settings.id == 1).select(limitby=(0, 1)).first() host = settings.inbound_mail_server server_type = settings.inbound_mail_type ssl = settings.inbound_mail_ssl port = settings.inbound_mail_port username = settings.inbound_mail_username password = settings.inbound_mail_password delete = settings.inbound_mail_delete if server_type == "pop3": import poplib # http://docs.python.org/library/poplib.html try: if ssl: p = poplib.POP3_SSL(host, port) else: p = poplib.POP3(host, port) except socket.error, e: error = "Cannot connect: %s" % e print error # Store status in the DB try: id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id db(db.msg_email_inbound_status.id == id).update(status=error) except: db.msg_email_inbound_status.insert(status=error) # Explicitly commit DB operations when running from Cron db.commit() sys.exit(1) try: # Attempting APOP authentication... p.apop(username, password) except poplib.error_proto: # Attempting standard authentication... try: p.user(username) p.pass_(password) except poplib.error_proto, e: print "Login failed:", e # Store status in the DB try: id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id db(db.msg_email_inbound_status.id == id).update(status="Login failed: %s" % e) except: db.msg_email_inbound_status.insert(status="Login failed: %s" % e) # Explicitly commit DB operations when running from Cron db.commit() sys.exit(1) dellist = [] mblist = p.list()[1] for item in mblist: number, octets = item.split(" ") # Retrieve the message (storing it in a list of lines) lines = p.retr(number)[1] # Create an e-mail object representing the message msg = email.message_from_string("\n".join(lines)) # Parse out the 'From' Header sender = msg["from"] # Parse out the 'Subject' Header if "subject" in msg: subject = msg["subject"] else: subject = "" # Parse out the 'Body' textParts = msg.get_payload() body = textParts[0].get_payload() # Store in DB uuidstamp = uuid.uuid4() db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body) if delete: # Add it to the list of messages to delete later dellist.append(number) # Explicitly commit DB operations when running from Cron db.commit() # Iterate over the list of messages to delete for number in dellist: p.dele(number) p.quit() elif server_type == "imap": import imaplib # http://docs.python.org/library/imaplib.html try: if ssl: M = imaplib.IMAP4_SSL(host, port) else: M = imaplib.IMAP4(host, port) except socket.error, e: error = "Cannot connect: %s" % e print error # Store status in the DB try: id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id db(db.msg_email_inbound_status.id == id).update(status=error) except: db.msg_email_inbound_status.insert(status=error) # Explicitly commit DB operations when running from Cron db.commit() sys.exit(1) try: M.login(username, password) except M.error, e: error = "Login failed: %s" % e print error # Store status in the DB try: id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id db(db.msg_email_inbound_status.id == id).update(status=error) except: db.msg_email_inbound_status.insert(status=error) # Explicitly commit DB operations when running from Cron db.commit() sys.exit(1) dellist = [] # Select inbox M.select() # Search for Messages to Download typ, data = M.search(None, "ALL") for num in data[0].split(): typ, msg_data = M.fetch(num, "(RFC822)") for response_part in msg_data: if isinstance(response_part, tuple): msg = email.message_from_string(response_part[1]) # Parse out the 'From' Header sender = msg["from"] # Parse out the 'Subject' Header if "subject" in msg: subject = msg["subject"] else: subject = "" # Parse out the 'Body' textParts = msg.get_payload() body = textParts[0].get_payload() # Store in DB uuidstamp = uuid.uuid4() db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body) if delete: # Add it to the list of messages to delete later dellist.append(num) # Explicitly commit DB operations when running from Cron db.commit() # Iterate over the list of messages to delete for number in dellist: typ, response = M.store(number, "+FLAGS", r"(\Deleted)") M.close() M.logout()
cron/email_receive.py
5,826
-*- coding: utf-8 -*- This is a simple mailbox polling script for the Sahana Messaging Module If there is a need to collect from non-compliant mailers then suggest using the robust Fetchmail to collect & store in a more compliant mailer! This script doesn't handle MIME attachments Read-in configuration from Database http://docs.python.org/library/poplib.html Store status in the DB Explicitly commit DB operations when running from Cron Attempting APOP authentication... Attempting standard authentication... Store status in the DB Explicitly commit DB operations when running from Cron Retrieve the message (storing it in a list of lines) Create an e-mail object representing the message Parse out the 'From' Header Parse out the 'Subject' Header Parse out the 'Body' Store in DB Add it to the list of messages to delete later Explicitly commit DB operations when running from Cron Iterate over the list of messages to delete http://docs.python.org/library/imaplib.html Store status in the DB Explicitly commit DB operations when running from Cron Store status in the DB Explicitly commit DB operations when running from Cron Select inbox Search for Messages to Download Parse out the 'From' Header Parse out the 'Subject' Header Parse out the 'Body' Store in DB Add it to the list of messages to delete later Explicitly commit DB operations when running from Cron Iterate over the list of messages to delete
1,411
en
0.762722
""" Module to wrap an integer in bitwise flag/field accessors. """ from collections import OrderedDict from pcapng.ngsix import namedtuple, Iterable class FlagBase(object): """\ Base class for flag types to be used in a Flags object. Handles the bitwise math so subclasses don't have to worry about it. """ __slots__ = [ 'owner', 'offset', 'size', 'extra', 'mask', ] def __init__(self, owner, offset, size, extra=None): if size < 1: raise TypeError('Flag must be at least 1 bit wide') if size > owner._nbits: raise TypeError('Flag must fit into owner size') self.owner = owner self.offset = offset self.size = size self.extra = extra self.mask = ((1 << self.size)-1) << self.offset def get_bits(self): return (self.owner._value & self.mask) >> self.offset def set_bits(self, val): val &= (1 << self.size) - 1 self.owner._value &= ~self.mask self.owner._value |= (val << self.offset) class FlagBool(FlagBase): """Object representing a single boolean flag""" def __init__(self, owner, offset, size, extra=None): if size != 1: raise TypeError('{cls} can only be 1 bit in size'.format(cls=self.__class__.__name__)) super(FlagBool, self).__init__(owner, offset, size) def get(self): return bool(self.get_bits()) def set(self, val): self.set_bits(int(bool(val))) class FlagUInt(FlagBase): """\ Object representing an unsigned integer of the given size stored in a larger bitfield """ def get(self): return self.get_bits() def set(self, val): self.set_bits(val) class FlagEnum(FlagBase): """\ Object representing a range of values stored in part of a larger bitfield """ def __init__(self, owner, offset, size, extra=None): if not isinstance(extra, Iterable): raise TypeError('{cls} needs an iterable of values'.format(cls=self.__class__.__name__)) extra = list(extra) if len(extra) > 2**size: raise TypeError('{cls} iterable has too many values (got {got}, {size} bits only address {max})'.format(cls=self.__class__.__name__, got=len(extra), size=size, max=2**size)) super(FlagEnum, self).__init__(owner, offset, size, extra) def get(self): val = self.get_bits() try: return self.extra[val] except IndexError: return '[invalid value]' def set(self, val): if val in self.extra: self.set_bits(self.extra.index(val)) elif isinstance(val, int): self.set_bits(val) else: raise TypeError('Invalid value {val} for {cls}'.format(val=val, cls=self.__class__.__name__)) # Class representing a single flag schema for FlagWord. # 'nbits' defaults to 1, and 'extra' defaults to None. FlagField = namedtuple('FlagField', ('name', 'ftype', 'nbits', 'extra'), defaults=(1, None)) class FlagWord(object): """\ Class to wrap an integer in bitwise flag/field accessors. """ __slots__ = [ '_nbits', '_value', '_schema', ] def __init__(self, schema, nbits=32, initial=0): """ :param schema: A list of FlagField objects representing the values to be packed into this object, in order from LSB to MSB of the underlying int :param nbits: An integer representing the total number of bits used for flags :param initial: The initial integer value of the flags field """ self._nbits = nbits self._value = initial self._schema = OrderedDict() tot_bits = sum([item.nbits for item in schema]) if tot_bits > nbits: raise TypeError("Too many fields for {nbits}-bit field (schema defines {tot} bits)".format(nbits=nbits, tot=tot_bits)) bitn = 0 for item in schema: if not isinstance(item, FlagField): raise TypeError('Schema must be composed of FlagField objects') if not issubclass(item.ftype, FlagBase): raise TypeError('Expected FlagBase, got {}'.format(item.ftype)) self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra) bitn += item.nbits def __int__(self): return self._value def __repr__(self): rv = '<{0} (value={1})'.format(self.__class__.__name__, self._value) for k, v in self._schema.items(): rv += ' {0}={1}'.format(k, v.get()) return rv+'>' def __getattr__(self, name): try: v = self._schema[name] except KeyError: raise AttributeError(name) return v.get() def __setattr__(self, name, val): try: return object.__setattr__(self, name, val) except AttributeError: pass try: v = self._schema[name] except KeyError: raise AttributeError(name) return v.set(val) if __name__ == '__main__': f = FlagWord([ FlagField('inout', FlagEnum, 2, ('NA', 'inbound', 'outbound')), FlagField('casttype', FlagEnum, 3, ('NA', 'unicast', 'multicast', 'broadcast', 'promiscuous')), FlagField('fcslen', FlagUInt, 4), FlagField('reserved', FlagUInt, 7), FlagField('err_16', FlagBool), FlagField('err_17', FlagBool), FlagField('err_18', FlagBool), FlagField('err_19', FlagBool), FlagField('err_20', FlagBool), FlagField('err_21', FlagBool), FlagField('err_22', FlagBool), FlagField('err_23', FlagBool), FlagField('err_crc', FlagBool), FlagField('err_long', FlagBool), FlagField('err_short', FlagBool), FlagField('err_frame_gap', FlagBool), FlagField('err_frame_align', FlagBool), FlagField('err_frame_delim', FlagBool), FlagField('err_preamble', FlagBool), FlagField('err_symbol', FlagBool), ]) f.fcslen = 12 print(f) print(int(f))
pcapng/flags.py
6,303
Base class for flag types to be used in a Flags object. Handles the bitwise math so subclasses don't have to worry about it. Object representing a single boolean flag Object representing a range of values stored in part of a larger bitfield Object representing an unsigned integer of the given size stored in a larger bitfield Class to wrap an integer in bitwise flag/field accessors. :param schema: A list of FlagField objects representing the values to be packed into this object, in order from LSB to MSB of the underlying int :param nbits: An integer representing the total number of bits used for flags :param initial: The initial integer value of the flags field Module to wrap an integer in bitwise flag/field accessors. Class representing a single flag schema for FlagWord. 'nbits' defaults to 1, and 'extra' defaults to None.
859
en
0.747435
""" Unit tests for nltk.tokenize. See also nltk/test/tokenize.doctest """ import pytest from nltk.tokenize import ( punkt, word_tokenize, TweetTokenizer, StanfordSegmenter, TreebankWordTokenizer, SyllableTokenizer, LegalitySyllableTokenizer, ) def setup_module(module): import pytest try: seg = StanfordSegmenter() seg.default_config("ar") seg.default_config("zh") except LookupError as e: pytest.skip( "Tests for nltk.tokenize.stanford_segmenter skipped: %s" % str(e) ) try: StanfordTokenizer() except LookupError: pytest.skip( "Tests for nltk.tokenize.stanford are skipped because the stanford postagger jar doesn't exist" ) class TestTokenize: def test_tweet_tokenizer(self): """ Test TweetTokenizer using words with special and accented characters. """ tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True) s9 = "@myke: Let's test these words: resumé España München français" tokens = tokenizer.tokenize(s9) expected = [ ':', "Let's", 'test', 'these', 'words', ':', 'resumé', 'España', 'München', 'français', ] assert tokens == expected def test_sonority_sequencing_syllable_tokenizer(self): """ Test SyllableTokenizer tokenizer. """ tokenizer = SyllableTokenizer() tokens = tokenizer.tokenize('justification') assert tokens == ['jus', 'ti', 'fi', 'ca', 'tion'] def test_legality_principle_syllable_tokenizer(self): """ Test LegalitySyllableTokenizer tokenizer. """ from nltk.corpus import words test_word = "wonderful" tokenizer = LegalitySyllableTokenizer(words.words()) tokens = tokenizer.tokenize(test_word) assert tokens == ['won', 'der', 'ful'] def test_stanford_segmenter_arabic(self): """ Test the Stanford Word Segmenter for Arabic (default config) """ try: seg = StanfordSegmenter() seg.default_config('ar') sent = u'يبحث علم الحاسوب استخدام الحوسبة بجميع اشكالها لحل المشكلات' segmented_sent = seg.segment(sent.split()) assert segmented_sent.split() == [ 'يبحث', 'علم', 'الحاسوب', 'استخدام', 'الحوسبة', 'ب', 'جميع', 'اشكال', 'ها', 'ل', 'حل', 'المشكلات', ] except LookupError as e: pytest.skip(str(e)) def test_stanford_segmenter_chinese(self): """ Test the Stanford Word Segmenter for Chinese (default config) """ try: seg = StanfordSegmenter() seg.default_config('zh') sent = u"这是斯坦福中文分词器测试" segmented_sent = seg.segment(sent.split()) assert segmented_sent.split() == ['这', '是', '斯坦福', '中文', '分词器', '测试'] except LookupError as e: pytest.skip(str(e)) def test_phone_tokenizer(self): """ Test a string that resembles a phone number but contains a newline """ # Should be recognized as a phone number, albeit one with multiple spaces tokenizer = TweetTokenizer() test1 = "(393) 928 -3010" expected = ['(393) 928 -3010'] result = tokenizer.tokenize(test1) assert result == expected # Due to newline, first three elements aren't part of a phone number; # fourth is test2 = "(393)\n928 -3010" expected = ['(', '393', ')', "928 -3010"] result = tokenizer.tokenize(test2) assert result == expected def test_pad_asterisk(self): """ Test padding of asterisk for word tokenization. """ text = "This is a, *weird sentence with *asterisks in it." expected = ['This', 'is', 'a', ',', '*', 'weird', 'sentence', 'with', '*', 'asterisks', 'in', 'it', '.'] assert word_tokenize(text) == expected def test_pad_dotdot(self): """ Test padding of dotdot* for word tokenization. """ text = "Why did dotdot.. not get tokenized but dotdotdot... did? How about manydots....." expected = ['Why', 'did', 'dotdot', '..', 'not', 'get', 'tokenized', 'but', 'dotdotdot', '...', 'did', '?', 'How', 'about', 'manydots', '.....'] assert word_tokenize(text) == expected def test_remove_handle(self): """ Test remove_handle() from casual.py with specially crafted edge cases """ tokenizer = TweetTokenizer(strip_handles=True) # Simple example. Handles with just numbers should be allowed test1 = "@twitter hello @twi_tter_. hi @12345 @123news" expected = ['hello', '.', 'hi'] result = tokenizer.tokenize(test1) assert result == expected # Handles are allowed to follow any of the following characters test2 = "@n`@n~@n(@n)@n-@n=@n+@n\\@n|@n[@n]@n{@n}@n;@n:@n'@n\"@n/@n?@n.@n,@n<@n>@n @n\n@n ñ@n.ü@n.ç@n." expected = [ '`', '~', '(', ')', '-', '=', '+', '\\', '|', '[', ']', '{', '}', ';', ':', "'", '"', '/', '?', '.', ',', '<', '>', 'ñ', '.', 'ü', '.', 'ç', '.', ] result = tokenizer.tokenize(test2) assert result == expected # Handles are NOT allowed to follow any of the following characters test3 = "a@n j@n z@n A@n L@n Z@n 1@n 4@n 7@n 9@n 0@n _@n !@n @@n #@n $@n %@n &@n *@n" expected = [ 'a', '@n', 'j', '@n', 'z', '@n', 'A', '@n', 'L', '@n', 'Z', '@n', '1', '@n', '4', '@n', '7', '@n', '9', '@n', '0', '@n', '_', '@n', '!', '@n', '@', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n', ] result = tokenizer.tokenize(test3) assert result == expected # Handles are allowed to precede the following characters test4 = "@n!a @n#a @n$a @n%a @n&a @n*a" expected = ['!', 'a', '#', 'a', '$', 'a', '%', 'a', '&', 'a', '*', 'a'] result = tokenizer.tokenize(test4) assert result == expected # Tests interactions with special symbols and multiple @ test5 = "@n!@n @n#@n @n$@n @n%@n @n&@n @n*@n @n@n @@n @n@@n @n_@n @n7@n @nj@n" expected = [ '!', '@n', '#', '@n', '$', '@n', '%', '@n', '&', '@n', '*', '@n', '@n', '@n', '@', '@n', '@n', '@', '@n', '@n_', '@n', '@n7', '@n', '@nj', '@n', ] result = tokenizer.tokenize(test5) assert result == expected # Tests that handles can have a max length of 20 test6 = "@abcdefghijklmnopqrstuvwxyz @abcdefghijklmnopqrst1234 @abcdefghijklmnopqrst_ @abcdefghijklmnopqrstendofhandle" expected = ['uvwxyz', '1234', '_', 'endofhandle'] result = tokenizer.tokenize(test6) assert result == expected # Edge case where an @ comes directly after a long handle test7 = "@abcdefghijklmnopqrstu@abcde @abcdefghijklmnopqrst@abcde @abcdefghijklmnopqrst_@abcde @abcdefghijklmnopqrst5@abcde" expected = [ 'u', '@abcde', '@abcdefghijklmnopqrst', '@abcde', '_', '@abcde', '5', '@abcde', ] result = tokenizer.tokenize(test7) assert result == expected def test_treebank_span_tokenizer(self): """ Test TreebankWordTokenizer.span_tokenize function """ tokenizer = TreebankWordTokenizer() # Test case in the docstring test1 = "Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks)." expected = [ (0, 4), (5, 12), (13, 17), (18, 19), (19, 23), (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78), ] result = list(tokenizer.span_tokenize(test1)) assert result == expected # Test case with double quotation test2 = "The DUP is similar to the \"religious right\" in the United States and takes a hardline stance on social issues" expected = [ (0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 85), (86, 92), (93, 95), (96, 102), (103, 109), ] result = list(tokenizer.span_tokenize(test2)) assert result == expected # Test case with double qoutation as well as converted quotations test3 = "The DUP is similar to the \"religious right\" in the United States and takes a ``hardline'' stance on social issues" expected = [ (0, 3), (4, 7), (8, 10), (11, 18), (19, 21), (22, 25), (26, 27), (27, 36), (37, 42), (42, 43), (44, 46), (47, 50), (51, 57), (58, 64), (65, 68), (69, 74), (75, 76), (77, 79), (79, 87), (87, 89), (90, 96), (97, 99), (100, 106), (107, 113), ] result = list(tokenizer.span_tokenize(test3)) assert result == expected def test_word_tokenize(self): """ Test word_tokenize function """ sentence = "The 'v', I've been fooled but I'll seek revenge." expected = ['The', "'", 'v', "'", ',', 'I', "'ve", 'been', 'fooled', 'but', 'I', "'ll", 'seek', 'revenge', '.'] assert word_tokenize(sentence) == expected sentence = "'v' 're'" expected = ["'", 'v', "'", "'re", "'"] assert word_tokenize(sentence) == expected def test_punkt_pair_iter(self): test_cases = [ ('12', [('1', '2'), ('2', None)]), ('123', [('1', '2'), ('2', '3'), ('3', None)]), ('1234', [('1', '2'), ('2', '3'), ('3', '4'), ('4', None)]), ] for (test_input, expected_output) in test_cases: actual_output = [x for x in punkt._pair_iter(test_input)] assert actual_output == expected_output def test_punkt_pair_iter_handles_stop_iteration_exception(self): # test input to trigger StopIteration from next() it = iter([]) # call method under test and produce a generator gen = punkt._pair_iter(it) # unpack generator, ensure that no error is raised list(gen) def test_punkt_tokenize_words_handles_stop_iteration_exception(self): obj = punkt.PunktBaseClass() class TestPunktTokenizeWordsMock: def word_tokenize(self, s): return iter([]) obj._lang_vars = TestPunktTokenizeWordsMock() # unpack generator, ensure that no error is raised list(obj._tokenize_words('test')) def test_punkt_tokenize_custom_lang_vars(self): # Create LangVars including a full stop end character as used in Bengali class BengaliLanguageVars(punkt.PunktLanguageVars): sent_end_chars = ('.', '?', '!', '\u0964') obj = punkt.PunktSentenceTokenizer(lang_vars = BengaliLanguageVars()) # We now expect these sentences to be split up into the individual sentences sentences = u"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।" expected = ["উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন।", "অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন।", "এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"] assert obj.tokenize(sentences) == expected def test_punkt_tokenize_no_custom_lang_vars(self): obj = punkt.PunktSentenceTokenizer() # We expect these sentences to not be split properly, as the Bengali full stop '।' is not included in the default language vars sentences = u"উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।" expected = ["উপরাষ্ট্রপতি শ্রী এম ভেঙ্কাইয়া নাইডু সোমবার আই আই টি দিল্লির হীরক জয়ন্তী উদযাপনের উদ্বোধন করেছেন। অনলাইনের মাধ্যমে এই অনুষ্ঠানে কেন্দ্রীয় মানব সম্পদ উন্নয়নমন্ত্রী শ্রী রমেশ পোখরিয়াল ‘নিশাঙ্ক’ উপস্থিত ছিলেন। এই উপলক্ষ্যে উপরাষ্ট্রপতি হীরকজয়ন্তীর লোগো এবং ২০৩০-এর জন্য প্রতিষ্ঠানের লক্ষ্য ও পরিকল্পনার নথি প্রকাশ করেছেন।"] assert obj.tokenize(sentences) == expected
nltk/test/unit/test_tokenize.py
17,387
Test LegalitySyllableTokenizer tokenizer. Test padding of asterisk for word tokenization. Test padding of dotdot* for word tokenization. Test a string that resembles a phone number but contains a newline Test remove_handle() from casual.py with specially crafted edge cases Test SyllableTokenizer tokenizer. Test the Stanford Word Segmenter for Arabic (default config) Test the Stanford Word Segmenter for Chinese (default config) Test TreebankWordTokenizer.span_tokenize function Test TweetTokenizer using words with special and accented characters. Test word_tokenize function Unit tests for nltk.tokenize. See also nltk/test/tokenize.doctest Should be recognized as a phone number, albeit one with multiple spaces Due to newline, first three elements aren't part of a phone number; fourth is Simple example. Handles with just numbers should be allowed Handles are allowed to follow any of the following characters Handles are NOT allowed to follow any of the following characters Handles are allowed to precede the following characters Tests interactions with special symbols and multiple @ Tests that handles can have a max length of 20 Edge case where an @ comes directly after a long handle Test case in the docstring Test case with double quotation Test case with double qoutation as well as converted quotations test input to trigger StopIteration from next() call method under test and produce a generator unpack generator, ensure that no error is raised unpack generator, ensure that no error is raised Create LangVars including a full stop end character as used in Bengali We now expect these sentences to be split up into the individual sentences We expect these sentences to not be split properly, as the Bengali full stop '।' is not included in the default language vars
1,786
en
0.84219
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import torch from detr.models.backbone import Backbone, Joiner from detr.models.detr import DETR, PostProcess from detr.models.position_encoding import PositionEmbeddingSine from detr.models.segmentation import DETRsegm, PostProcessPanoptic from detr.models.transformer import Transformer dependencies = ["torch", "torchvision"] def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): hidden_dim = 256 backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) backbone_with_pos_enc = Joiner(backbone, pos_enc) backbone_with_pos_enc.num_channels = backbone.num_channels transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) if mask: return DETRsegm(detr) return detr def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): """ DETR R50 with 6 encoder and 6 decoder layers. Achieves 42/62.4 AP/AP50 on COCO val5k. """ model = _make_detr("resnet50", dilation=False, num_classes=num_classes) if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): """ DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 43.3/63.1 AP/AP50 on COCO val5k. """ model = _make_detr("resnet50", dilation=True, num_classes=num_classes) if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): """ DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 43.5/63.8 AP/AP50 on COCO val5k. """ model = _make_detr("resnet101", dilation=False, num_classes=num_classes) if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): """ DETR-DC5 R101 with 6 encoder and 6 decoder layers. The last block of ResNet-101 has dilation to increase output resolution. Achieves 44.9/64.7 AP/AP50 on COCO val5k. """ model = _make_detr("resnet101", dilation=True, num_classes=num_classes) if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model def detr_resnet50_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False ): """ DETR R50 with 6 encoder and 6 decoder layers. Achieves 43.4 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction """ model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) is_thing_map = {i: i <= 90 for i in range(250)} if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", map_location="cpu", check_hash=True, ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcessPanoptic(is_thing_map, threshold=threshold) return model def detr_resnet50_dc5_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False ): """ DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 44.6 on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction """ model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) is_thing_map = {i: i <= 90 for i in range(250)} if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", map_location="cpu", check_hash=True, ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcessPanoptic(is_thing_map, threshold=threshold) return model def detr_resnet101_panoptic( pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False ): """ DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 45.1 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction """ model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) is_thing_map = {i: i <= 90 for i in range(250)} if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", map_location="cpu", check_hash=True, ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcessPanoptic(is_thing_map, threshold=threshold) return model
detr/hubconf.py
6,290
DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 43.5/63.8 AP/AP50 on COCO val5k. DETR-DC5 R101 with 6 encoder and 6 decoder layers. The last block of ResNet-101 has dilation to increase output resolution. Achieves 44.9/64.7 AP/AP50 on COCO val5k. DETR-DC5 R101 with 6 encoder and 6 decoder layers. Achieves 45.1 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction DETR R50 with 6 encoder and 6 decoder layers. Achieves 42/62.4 AP/AP50 on COCO val5k. DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 43.3/63.1 AP/AP50 on COCO val5k. DETR-DC5 R50 with 6 encoder and 6 decoder layers. The last block of ResNet-50 has dilation to increase output resolution. Achieves 44.6 on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction DETR R50 with 6 encoder and 6 decoder layers. Achieves 43.4 PQ on COCO val5k. threshold is the minimum confidence required for keeping segments in the prediction Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
1,166
en
0.810585
""" 'storage-add ' sub command """ #To prevent Py2 to interpreting print(val) as a tuple. from __future__ import print_function import os import tempfile import sys import json import utils from storage_yaml import to_storage_yaml # noqa # pylint: disable=too-many-branches def set_args(name, subparsers): """ add arguments, and their options """ parser = subparsers.add_parser(name) arg = parser.add_argument arg( "name", help="Storage Name" ) arg( "--type", help="Storage Type", choices=["Replica1", "Replica3", "External", "Replica2"], default=None ) arg( "--device", help=("Storage device in <node>:<device> format, " "Example: --device kube1.example.com:/dev/vdc"), default=[], action="append" ) arg( "--path", help=("Storage path in <node>:<path> format, " "Example: --path kube1.example.com:/exports/data"), default=[], action="append" ) arg( "--pvc", help="Storage from pvc, Example: --pvc local-pvc-1", default=[], action="append" ) arg( "--external", help="Storage from external gluster, Example: --external gluster-node:/gluster-volname", default=None ) arg( "--tiebreaker", help="If type is 'Replica2', one can have a tiebreaker node along " "with it. like '--tiebreaker tie-breaker-node-name:/data/tiebreaker'", default=None ) utils.add_global_flags(parser) def validate(args): """ validate arguments """ if args.external is not None: if args.type and args.type != "External": print("'--external' option is used only with '--type External'", file=sys.stderr) sys.exit(1) if ":" not in args.external: print("Invalid external storage details. Please specify " "details in the format <node>:/<volname>", file=sys.stderr) sys.exit(1) # Set type to External as '--external' option is provided args.type = "External" if args.tiebreaker: if args.type != "Replica2": print("'--tiebreaker' option should be used only with " "type 'Replica2'", file=sys.stderr) sys.exit(1) if ":" not in args.tiebreaker: print("Invalid tiebreaker details. Please specify details " "in the format <node>:/<path>", file=sys.stderr) sys.exit(1) else: args.tiebreaker = "tie-breaker.kadalu.io:/mnt" if not args.type: args.type = "Replica1" num_storages = (len(args.device) + len(args.path) + len(args.pvc)) or \ (1 if args.external is not None else 0) if num_storages == 0: print("Please specify at least one storage", file=sys.stderr) sys.exit(1) # pylint: disable=too-many-boolean-expressions if ((args.type == "Replica1" and num_storages != 1) or (args.type == "Replica2" and num_storages != 2) or (args.type == "Replica3" and num_storages != 3)): print("Number of storages not matching for type=%s" % args.type, file=sys.stderr) sys.exit(1) kube_nodes = get_kube_nodes(args) for dev in args.device: if ":" not in dev: print("Invalid storage device details. Please specify device " "details in the format <node>:<device>", file=sys.stderr) sys.exit(1) if (not args.dry_run) and (dev.split(":")[0] not in kube_nodes): print("Node name does not appear to be valid: " + dev) sys.exit(1) for path in args.path: if ":" not in path: print("Invalid storage path details. Please specify path " "details in the format <node>:<path>", file=sys.stderr) sys.exit(1) if (not args.dry_run) and (path.split(":")[0] not in kube_nodes): print("Node name does not appear to be valid: " + path) sys.exit(1) def get_kube_nodes(args): """ gets all nodes """ if args.dry_run: return [] cmd = utils.kubectl_cmd(args) + ["get", "nodes", "-ojson"] try: resp = utils.execute(cmd) data = json.loads(resp.stdout) nodes = [] for nodedata in data["items"]: nodes.append(nodedata["metadata"]["name"]) print("The following nodes are available:\n %s" % ", ".join(nodes)) print() return nodes except utils.CommandError as err: utils.command_error(cmd, err.stderr) except FileNotFoundError: utils.kubectl_cmd_help(args.kubectl_cmd) def storage_add_data(args): """ Build the config file """ content = { "apiVersion": "kadalu-operator.storage/v1alpha1", "kind": "KadaluStorage", "metadata": { "name": args.name }, "spec": { "type": args.type, "storage": [] } } # External details are specified, no 'storage' section required if args.external: node, vol = args.external.split(":") content["spec"]["details"] = [ { "gluster_host": node, "gluster_volname": vol.strip("/") } ] return content # Everything below can be provided for a 'Replica3' setup. # Or two types of data can be provided for 'Replica2'. # So, return only at the end. # Device details are specified if args.device: for devdata in args.device: node, dev = devdata.split(":") content["spec"]["storage"].append( { "node": node, "device": dev } ) # If Path is specified if args.path: for pathdata in args.path: node, path = pathdata.split(":") content["spec"]["storage"].append( { "node": node, "path": path } ) # If PVC is specified if args.pvc: for pvc in args.pvc: content["spec"]["storage"].append( { "pvc": pvc } ) # TODO: Support for different port can be added later if args.type == "Replica2": node, path = args.tiebreaker.split(":") content["spec"]["tiebreaker"] = { "node": node, "path": path, "port": 24007 } return content def run(args): """ Adds the subcommand arguments back to main CLI tool """ data = storage_add_data(args) yaml_content = to_storage_yaml(data) print("Storage Yaml file for your reference:\n") print(yaml_content) if args.dry_run: return if not args.script_mode: answer = "" valid_answers = ["yes", "no", "n", "y"] while answer not in valid_answers: answer = input("Is this correct?(Yes/No): ") answer = answer.strip().lower() if answer in ["n", "no"]: return config, tempfile_path = tempfile.mkstemp(prefix="kadalu") try: with os.fdopen(config, 'w') as tmp: tmp.write(yaml_content) cmd = utils.kubectl_cmd(args) + ["create", "-f", tempfile_path] resp = utils.execute(cmd) print("Storage add request sent successfully") print(resp.stdout) print() except utils.CommandError as err: os.remove(tempfile_path) utils.command_error(cmd, err.stderr) except FileNotFoundError: os.remove(tempfile_path) utils.kubectl_cmd_help(args.kubectl_cmd) finally: if os.path.exists(tempfile_path): os.remove(tempfile_path)
cli/kubectl_kadalu/storage_add.py
7,886
gets all nodes Adds the subcommand arguments back to main CLI tool add arguments, and their options Build the config file validate arguments 'storage-add ' sub command To prevent Py2 to interpreting print(val) as a tuple. noqa pylint: disable=too-many-branches Set type to External as '--external' option is provided pylint: disable=too-many-boolean-expressions External details are specified, no 'storage' section required Everything below can be provided for a 'Replica3' setup. Or two types of data can be provided for 'Replica2'. So, return only at the end. Device details are specified If Path is specified If PVC is specified TODO: Support for different port can be added later
691
en
0.727748
# Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=protected-access r""" This module contains the abstract base classes for defining PennyLane operations and observables. Description ----------- Qubit Operations ~~~~~~~~~~~~~~~~ The :class:`Operator` class serves as a base class for operators, and is inherited by both the :class:`Observable` class and the :class:`Operation` class. These classes are subclassed to implement quantum operations and measure observables in PennyLane. * Each :class:`~.Operator` subclass represents a general type of map between physical states. Each instance of these subclasses represents either - an application of the operator or - an instruction to measure and return the respective result. Operators act on a sequence of wires (subsystems) using given parameter values. * Each :class:`~.Operation` subclass represents a type of quantum operation, for example a unitary quantum gate. Each instance of these subclasses represents an application of the operation with given parameter values to a given sequence of wires (subsystems). * Each :class:`~.Observable` subclass represents a type of physical observable. Each instance of these subclasses represents an instruction to measure and return the respective result for the given parameter values on a sequence of wires (subsystems). Differentiation ^^^^^^^^^^^^^^^ In general, an :class:`Operation` is differentiable (at least using the finite-difference method) with respect to a parameter iff * the domain of that parameter is continuous. For an :class:`Operation` to be differentiable with respect to a parameter using the analytic method of differentiation, it must satisfy an additional constraint: * the parameter domain must be real. .. note:: These conditions are *not* sufficient for analytic differentiation. For example, CV gates must also define a matrix representing their Heisenberg linear transformation on the quadrature operators. For gates that *are* supported via the analytic method, the gradient recipe works as follows: .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k+s_i). where :math:`f` is the expectation value of an observable on a circuit that has been evolved by the operation being considered with parameter :math:`\phi_k`, there are multiple terms indexed with :math:`i` for each parameter :math:`\phi` and the :math:`[c_i, a_i, s_i]` are coefficients specific to the gate. The following specific case holds for example for qubit operations that are generated by one of the Pauli matrices and results in an overall positive and negative shift: .. math:: \frac{\partial}{\partial\phi_k}f = \frac{1}{2}\left[f \left( \phi_k+\frac{\pi}{2} \right) - f \left( \phi_k-\frac{\pi}{2} \right)\right], i.e., so that :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]`. CV Operation base classes ~~~~~~~~~~~~~~~~~~~~~~~~~ Due to additional requirements, continuous-variable (CV) operations must subclass the :class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation` and :class:`~.Observable`. Differentiation ^^^^^^^^^^^^^^^ To enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to provide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of the operation given its list of parameters, namely: * For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the operation on the vector of quadrature operators :math:`\mathbf{r}` for the given parameter values. * For Gaussian CV Observables this method should return a real vector (first-order observables) or symmetric matrix (second-order observables) of coefficients of the quadrature operators :math:`\x` and :math:`\p`. PennyLane uses the convention :math:`\mathbf{r} = (\I, \x, \p)` for single-mode operations and observables and :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)` for multi-mode operations and observables. .. note:: Non-Gaussian CV operations and observables are currently only supported via the finite-difference method of gradient computation. """ import abc import copy import itertools import functools import numbers from enum import Enum, IntEnum import numpy as np from numpy.linalg import multi_dot import pennylane as qml from pennylane.wires import Wires from .utils import pauli_eigs from .variable import Variable # ============================================================================= # Wire types # ============================================================================= class WiresEnum(IntEnum): """Integer enumeration class to represent the number of wires an operation acts on""" AnyWires = -1 AllWires = 0 AllWires = WiresEnum.AllWires """IntEnum: An enumeration which represents all wires in the subsystem. It is equivalent to an integer with value 0.""" AnyWires = WiresEnum.AnyWires """IntEnum: An enumeration which represents any wires in the subsystem. It is equivalent to an integer with value -1.""" # ============================================================================= # ObservableReturnTypes types # ============================================================================= class ObservableReturnTypes(Enum): """Enumeration class to represent the return types of an observable.""" Sample = "sample" Variance = "var" Expectation = "expval" Probability = "probs" State = "state" def __repr__(self): """String representation of the return types.""" return str(self.value) Sample = ObservableReturnTypes.Sample """Enum: An enumeration which represents sampling an observable.""" Variance = ObservableReturnTypes.Variance """Enum: An enumeration which represents returning the variance of an observable on specified wires.""" Expectation = ObservableReturnTypes.Expectation """Enum: An enumeration which represents returning the expectation value of an observable on specified wires.""" Probability = ObservableReturnTypes.Probability """Enum: An enumeration which represents returning probabilities of all computational basis states.""" State = ObservableReturnTypes.State """Enum: An enumeration which represents returning the state in the computational basis.""" # ============================================================================= # Class property # ============================================================================= class ClassPropertyDescriptor: # pragma: no cover """Allows a class property to be defined""" # pylint: disable=too-few-public-methods def __init__(self, fget, fset=None): self.fget = fget self.fset = fset def __get__(self, obj, klass=None): if klass is None: klass = type(obj) return self.fget.__get__(obj, klass)() def __set__(self, obj, value): if not self.fset: raise AttributeError("can't set attribute") type_ = type(obj) return self.fset.__get__(obj, type_)(value) def setter(self, func): """Set the function as a class method, and store as an attribute.""" if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) self.fset = func return self def classproperty(func): """The class property decorator""" if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) return ClassPropertyDescriptor(func) # ============================================================================= # Base Operator class # ============================================================================= class Operator(abc.ABC): r"""Base class for quantum operators supported by a device. The following class attributes must be defined for all Operators: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` Args: params (tuple[float, int, array, Variable]): operator parameters Keyword Args: wires (Iterable[Number, str], Number, str, Wires): Wires that the operator acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operator should be immediately pushed into the Operator queue. """ do_check_domain = True #: bool: flag: should we perform a domain check for the parameters? def __copy__(self): cls = self.__class__ copied_op = cls.__new__(cls) copied_op.data = self.data.copy() copied_op._wires = self.wires copied_op._name = self._name if hasattr(self, "_inverse"): copied_op._inverse = self._inverse return copied_op def __deepcopy__(self, memo): cls = self.__class__ copied_op = cls.__new__(cls) # The memo dict maps object ID to object, and is required by # the deepcopy function to keep track of objects it has already # deep copied. memo[id(self)] = copied_op for attribute, value in self.__dict__.items(): if attribute == "data": # Shallow copy the list of parameters. We avoid a deep copy # here, since PyTorch does not support deep copying of tensors # within a differentiable computation. copied_op.data = value.copy() else: # Deep copy everything else. setattr(copied_op, attribute, copy.deepcopy(value, memo)) return copied_op @classmethod def _matrix(cls, *params): """Matrix representation of the operator in the computational basis. This is a *class method* that should be defined for all new operations and observables, that returns the matrix representing the operator in the computational basis. This private method allows matrices to be computed directly without instantiating the operators first. To return the matrices of *instantiated* operators, please use the :attr:`~.Operator.matrix` property instead. **Example:** >>> qml.RY._matrix(0.5) >>> array([[ 0.96891242+0.j, -0.24740396+0.j], [ 0.24740396+0.j, 0.96891242+0.j]]) Returns: array: matrix representation """ raise NotImplementedError @property def matrix(self): r"""Matrix representation of an instantiated operator in the computational basis. **Example:** >>> U = qml.RY(0.5, wires=1) >>> U.matrix >>> array([[ 0.96891242+0.j, -0.24740396+0.j], [ 0.24740396+0.j, 0.96891242+0.j]]) Returns: array: matrix representation """ return self._matrix(*self.parameters) @classmethod def _eigvals(cls, *params): """Eigenvalues of the operator. This is a *class method* that should be defined for all new operations and observables that returns the eigenvalues of the operator. Note that the eigenvalues are not guaranteed to be in any particular order. This private method allows eigenvalues to be computed directly without instantiating the operators first. The default implementation relies on the presence of the :attr:`_matrix` method. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.RZ._eigvals(0.5) >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigenvalue representation """ return np.linalg.eigvals(cls._matrix(*params)) @property def eigvals(self): r"""Eigenvalues of an instantiated operator. Note that the eigenvalues are not guaranteed to be in any particular order. **Example:** >>> U = qml.RZ(0.5, wires=1) >>> U.eigvals >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigvals representation """ return self._eigvals(*self.parameters) @property @abc.abstractmethod def num_params(self): """Number of parameters the operator takes.""" @property @abc.abstractmethod def num_wires(self): """Number of wires the operator acts on.""" @property @abc.abstractmethod def par_domain(self): """Domain of the gate parameters. * ``'N'``: natural numbers (including zero). * ``'R'``: floats. * ``'A'``: arrays of real or complex values. * ``'L'``: list of arrays of real or complex values. * ``None``: if there are no parameters. """ @property def name(self): """String for the name of the operator.""" return self._name @name.setter def name(self, value): self._name = value def __init__(self, *params, wires=None, do_queue=True): # pylint: disable=too-many-branches self._name = self.__class__.__name__ #: str: name of the operator self.queue_idx = None #: int, None: index of the Operator in the circuit queue, or None if not in a queue if wires is None: raise ValueError("Must specify the wires that {} acts on".format(self.name)) if isinstance(wires, Wires): self._wires = wires else: self._wires = Wires(wires) #: Wires: wires on which the operator acts # check that the number of wires given corresponds to required number if ( self.num_wires != AllWires and self.num_wires != AnyWires and len(self._wires) != self.num_wires ): raise ValueError( "{}: wrong number of wires. " "{} wires given, {} expected.".format(self.name, len(self._wires), self.num_wires) ) if len(params) != self.num_params: raise ValueError( "{}: wrong number of parameters. " "{} parameters passed, {} expected.".format(self.name, len(params), self.num_params) ) # check the validity of the params if self.do_check_domain: for p in params: self.check_domain(p) self.data = list(params) #: list[Any]: parameters of the operator if do_queue: self.queue() def __repr__(self): """Constructor-call-like representation.""" # FIXME using self.parameters here instead of self.data is dangerous, it assumes the data can be evaluated # which is only true if something suitable happens to remain in VariableRef.positional_arg_values etc. after # the last evaluation. if self.parameters: params = ", ".join([repr(p) for p in self.parameters]) return "{}({}, wires={})".format(self.name, params, self.wires.tolist()) return "{}(wires={})".format(self.name, self.wires.tolist()) def check_domain(self, p, flattened=False): """Check the validity of a parameter. :class:`.Variable` instances can represent any real scalars (but not arrays). Args: p (Number, array, Variable): parameter to check flattened (bool): True means p is an element of a flattened parameter sequence (affects the handling of 'A' parameters) Raises: TypeError: parameter is not an element of the expected domain ValueError: parameter is an element of an unknown domain Returns: Number, array, Variable: p """ # pylint: disable=too-many-branches # If parameter is a NumPy scalar, convert it into a Python scalar. if isinstance(p, np.ndarray) and p.ndim == 0: p = p.item() if isinstance(p, Variable): if self.par_domain == "A": raise TypeError( "{}: Array parameter expected, got a Variable, " "which can only represent real scalars.".format(self.name) ) return p # p is not a Variable if self.par_domain == "A": if flattened: if isinstance(p, np.ndarray): raise TypeError( "{}: Flattened array parameter expected, got {}.".format(self.name, type(p)) ) else: if not isinstance(p, np.ndarray): raise TypeError( "{}: Array parameter expected, got {}.".format(self.name, type(p)) ) elif self.par_domain in ("R", "N"): if not isinstance(p, numbers.Real): raise TypeError( "{}: Real scalar parameter expected, got {}.".format(self.name, type(p)) ) if self.par_domain == "N": if not isinstance(p, numbers.Integral): raise TypeError( "{}: Natural number parameter expected, got {}.".format(self.name, type(p)) ) if p < 0: raise TypeError( "{}: Natural number parameter expected, got {}.".format(self.name, p) ) elif self.par_domain == "L": if not isinstance(p, list): raise TypeError("{}: List parameter expected, got {}.".format(self.name, type(p))) if not all(isinstance(elem, np.ndarray) for elem in p): raise TypeError("List elements must be Numpy arrays.") else: raise ValueError( "{}: Unknown parameter domain '{}'.".format(self.name, self.par_domain) ) return p @property def wires(self): """Wires of this operator. Returns: Wires: wires """ return self._wires @property def parameters(self): """Current parameter values. Fixed parameters are returned as is, free parameters represented by :class:`.Variable` instances are replaced by their current numerical value. Returns: list[Any]: parameter values """ # TODO profiling def evaluate(p): """Evaluate a single parameter.""" if isinstance(p, np.ndarray): # object arrays may have Variables inside them if p.dtype == object: temp = np.array([x.val if isinstance(x, Variable) else x for x in p.flat]) return temp.reshape(p.shape) return p if isinstance(p, list): # p is assumed to be a list of numpy arrays # object arrays may have Variables inside them evaled_list = [] for arr in p: if arr.dtype == object: temp = np.array([x.val if isinstance(x, Variable) else x for x in arr.flat]) evaled_list.append(temp.reshape(arr.shape)) return evaled_list return p if isinstance(p, Variable): p = self.check_domain(p.val) return p return [evaluate(p) for p in self.data] def queue(self): """Append the operator to the Operator queue.""" qml.QueuingContext.append(self) return self # so pre-constructed Observable instances can be queued and returned in a single statement # ============================================================================= # Base Operation class # ============================================================================= class Operation(Operator): r"""Base class for quantum operations supported by a device. As with :class:`~.Operator`, the following class attributes must be defined for all operations: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Finally, there are some additional optional class attributes that may be set, and used by certain quantum optimizers: * :attr:`~.Operation.generator` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems it acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. """ # pylint: disable=abstract-method string_for_inverse = ".inv" @property def grad_method(self): """Gradient computation method. * ``'A'``: analytic differentiation using the parameter-shift method. * ``'F'``: finite difference numerical differentiation. * ``None``: the operation may not be differentiated. Default is ``'F'``, or ``None`` if the Operation has zero parameters. """ return None if self.num_params == 0 else "F" grad_recipe = None r"""tuple(Union(list[list[float]], None)) or None: Gradient recipe for the parameter-shift method. This is a tuple with one nested list per operation parameter. For parameter :math:`\phi_k`, the nested list contains elements of the form :math:`[c_i, a_i, s_i]` where :math:`i` is the index of the term, resulting in a gradient recipe of .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k + s_i). If ``None``, the default gradient recipe containing the two terms :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter. """ def get_parameter_shift(self, idx, shift=np.pi / 2): """Multiplier and shift for the given parameter, based on its gradient recipe. Args: idx (int): parameter index Returns: float, float: multiplier, shift """ # get the gradient recipe for this parameter recipe = self.grad_recipe[idx] # Default values multiplier = 0.5 / np.sin(shift) a = 1 # We set the default recipe following: # ∂f(x) = c*f(x+s) - c*f(x-s) # where we express a positive and a negative shift by default default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]] param_shift = default_param_shift if recipe is None else recipe if hasattr(self.data[idx], "mult"): # Parameter is a variable, we are in non-tape mode # Need to use the internal multiplier in the Variable to update the # multiplier and the shift var_mult = self.data[idx].mult for elem in param_shift: # Update the multiplier elem[0] *= var_mult if var_mult != 0: # Update the shift # zero multiplier means the shift is unimportant elem[2] /= var_mult return param_shift @property def generator(self): r"""Generator of the operation. A length-2 list ``[generator, scaling_factor]``, where * ``generator`` is an existing PennyLane operation class or :math:`2\times 2` Hermitian array that acts as the generator of the current operation * ``scaling_factor`` represents a scaling factor applied to the generator operation For example, if :math:`U(\theta)=e^{i0.7\theta \sigma_x}`, then :math:`\sigma_x`, with scaling factor :math:`s`, is the generator of operator :math:`U(\theta)`: .. code-block:: python generator = [PauliX, 0.7] Default is ``[None, 1]``, indicating the operation has no generator. """ return [None, 1] @property def inverse(self): """Boolean determining if the inverse of the operation was requested.""" return self._inverse @inverse.setter def inverse(self, boolean): self._inverse = boolean @staticmethod def decomposition(*params, wires): """Returns a template decomposing the operation into other quantum operations.""" raise NotImplementedError def inv(self): """Inverts the operation, such that the inverse will be used for the computations by the specific device. This method concatenates a string to the name of the operation, to indicate that the inverse will be used for computations. Any subsequent call of this method will toggle between the original operation and the inverse of the operation. Returns: :class:`Operator`: operation to be inverted """ self.inverse = not self._inverse return self @property def matrix(self): op_matrix = self._matrix(*self.parameters) if self.inverse: return op_matrix.conj().T return op_matrix @property def eigvals(self): op_eigvals = self._eigvals(*self.parameters) if self.inverse: return op_eigvals.conj() return op_eigvals @property def base_name(self): """Get base name of the operator.""" return self.__class__.__name__ @property def name(self): """Get and set the name of the operator.""" return self._name + Operation.string_for_inverse if self.inverse else self._name def __init__(self, *params, wires=None, do_queue=True): self._inverse = False # check the grad_method validity if self.par_domain == "N": assert ( self.grad_method is None ), "An operation may only be differentiated with respect to real scalar parameters." elif self.par_domain == "A": assert self.grad_method in ( None, "F", ), "Operations that depend on arrays containing free variables may only be differentiated using the F method." # check the grad_recipe validity if self.grad_method == "A": if self.grad_recipe is None: # default recipe for every parameter self.grad_recipe = [None] * self.num_params else: assert ( len(self.grad_recipe) == self.num_params ), "Gradient recipe must have one entry for each parameter!" else: assert self.grad_recipe is None, "Gradient recipe is only used by the A method!" super().__init__(*params, wires=wires, do_queue=do_queue) class DiagonalOperation(Operation): r"""Base class for diagonal quantum operations supported by a device. As with :class:`~.Operation`, the following class attributes must be defined for all operations: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Finally, there are some additional optional class attributes that may be set, and used by certain quantum optimizers: * :attr:`~.Operation.generator` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems it acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. """ # pylint: disable=abstract-method @classmethod def _eigvals(cls, *params): """Eigenvalues of the operator. The order of the eigenvalues needs to match the order of the computational basis vectors. This is a *class method* that must be defined for all new diagonal operations, that returns the eigenvalues of the operator in the computational basis. This private method allows eigenvalues to be computed directly without instantiating the operators first. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.RZ._eigvals(0.5) >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigenvalue representation """ raise NotImplementedError @property def eigvals(self): r"""Eigenvalues of an instantiated diagonal operation. The order of the eigenvalues needs to match the order of the computational basis vectors. **Example:** >>> U = qml.RZ(0.5, wires=1) >>> U.eigvals >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigvals representation """ return super().eigvals @classmethod def _matrix(cls, *params): return np.diag(cls._eigvals(*params)) class Channel(Operation, abc.ABC): r"""Base class for quantum channels. As with :class:`~.Operation`, the following class attributes must be defined for all channels: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` To define a noisy channel, the following attribute of :class:`~.Channel` can be used to list the corresponding Kraus matrices. * :attr:`~.Channel._kraus_matrices` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems the channel acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. """ # pylint: disable=abstract-method @classmethod @abc.abstractmethod def _kraus_matrices(cls, *params): """Kraus matrices representing a quantum channel, specified in the computational basis. This is a class method that should be defined for all new channels. It returns the Kraus matrices representing the channel in the computational basis. This private method allows matrices to be computed directly without instantiating the channel first. **Example** >>> qml.AmplitudeDamping._kraus_matrices(0.1) >>> [array([[1. , 0. ], [0. , 0.9486833]]), array([[0. , 0.31622777], [0. , 0. ]])] To return the Kraus matrices of an *instantiated* channel, please use the :attr:`~.Operator.kraus_matrices` property instead. Returns: list(array): list of Kraus matrices """ raise NotImplementedError @property def kraus_matrices(self): r"""Kraus matrices of an instantiated channel in the computational basis. ** Example** >>> U = qml.AmplitudeDamping(0.1, wires=1) >>> U.kraus_matrices >>> [array([[1. , 0. ], [0. , 0.9486833]]), array([[0. , 0.31622777], [0. , 0. ]])] Returns: list(array): list of Kraus matrices """ return self._kraus_matrices(*self.parameters) # ============================================================================= # Base Observable class # ============================================================================= class Observable(Operator): """Base class for observables supported by a device. :class:`Observable` is used to describe Hermitian quantum observables. As with :class:`~.Operator`, the following class attributes must be defined for all observables: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` Args: params (tuple[float, int, array, Variable]): observable parameters Keyword Args: wires (Sequence[int]): subsystems it acts on. Currently, only one subsystem is supported. do_queue (bool): Indicates whether the operation should be immediately pushed into the Operator queue. """ # pylint: disable=abstract-method return_type = None @classmethod def _eigvals(cls, *params): """Eigenvalues of the observable. The order of the eigenvalues needs to match the order of the computational basis vectors when the observable is diagonalized using :attr:`diagonalizing_gates`. This is a *class method* that must be defined for all new diagonal operations, that returns the eigenvalues of the operator in the computational basis. This private method allows eigenvalues to be computed directly without instantiating the operators first. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.PauliZ._eigvals() >>> array([1, -1]) Returns: array: eigenvalue representation """ raise NotImplementedError @property def eigvals(self): r"""Eigenvalues of an instantiated observable. The order of the eigenvalues needs to match the order of the computational basis vectors when the observable is diagonalized using :attr:`diagonalizing_gates`. This is a requirement for using qubit observables in quantum functions. **Example:** >>> U = qml.PauliZ(wires=1) >>> U.eigvals >>> array([1, -1]) Returns: array: eigvals representation """ return super().eigvals def __init__(self, *params, wires=None, do_queue=True): # extract the arguments if wires is None: wires = params[-1] params = params[:-1] super().__init__(*params, wires=wires, do_queue=do_queue) def __repr__(self): """Constructor-call-like representation.""" temp = super().__repr__() if self.return_type is None: return temp if self.return_type is Probability: return repr(self.return_type) + "(wires={})".format(self.wires.tolist()) return repr(self.return_type) + "(" + temp + ")" def __matmul__(self, other): if isinstance(other, Tensor): return other.__rmatmul__(self) if isinstance(other, Observable): return Tensor(self, other) raise ValueError("Can only perform tensor products between observables.") def _obs_data(self): r"""Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion. This allows for comparison between observables that are equivalent, but are expressed in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and `qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings. **Example** >>> tensor = qml.PauliX(0) @ qml.PauliZ(1) >>> print(tensor._obs_data()) {("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())} """ obs = Tensor(self).non_identity_obs tensor = set() for ob in obs: parameters = tuple(param.tostring() for param in ob.parameters) tensor.add((ob.name, ob.wires, parameters)) return tensor def compare(self, other): r"""Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`, to determine if they are equivalent. Observables/Hamiltonians are equivalent if they represent the same operator (their matrix representations are equal), and they are defined on the same wires. .. Warning:: The compare method does **not** check if the matrix representation of a :class:`~.Hermitian` observable is equal to an equivalent observable expressed in terms of Pauli matrices. To do so would require the matrix form of Hamiltonians and Tensors be calculated, which would drastically increase runtime. Returns: (bool): True if equivalent. **Examples** >>> ob1 = qml.PauliX(0) @ qml.Identity(1) >>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)]) >>> ob1.compare(ob2) True >>> ob1 = qml.PauliX(0) >>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0) >>> ob1.compare(ob2) False """ if isinstance(other, (Tensor, Observable)): return other._obs_data() == self._obs_data() if isinstance(other, qml.Hamiltonian): return other.compare(self) raise ValueError( "Can only compare an Observable/Tensor, and a Hamiltonian/Observable/Tensor." ) def __add__(self, other): r"""The addition operation between Observables/Tensors/qml.Hamiltonian objects.""" if isinstance(other, (Observable, Tensor)): return qml.Hamiltonian([1, 1], [self, other], simplify=True) if isinstance(other, qml.Hamiltonian): return other + self raise ValueError(f"Cannot add Observable and {type(other)}") def __mul__(self, a): r"""The scalar multiplication operation between a scalar and an Observable/Tensor.""" if isinstance(a, (int, float)): return qml.Hamiltonian([a], [self], simplify=True) raise ValueError(f"Cannot multiply Observable by {type(a)}") __rmul__ = __mul__ def __sub__(self, other): r"""The subtraction operation between Observables/Tensors/qml.Hamiltonian objects.""" if isinstance(other, (Observable, Tensor, qml.Hamiltonian)): return self.__add__(other.__mul__(-1)) raise ValueError(f"Cannot subtract {type(other)} from Observable") def diagonalizing_gates(self): r"""Returns the list of operations such that they diagonalize the observable in the computational basis. Returns: list(qml.Operation): A list of gates that diagonalize the observable in the computational basis. """ raise NotImplementedError class Tensor(Observable): """Container class representing tensor products of observables. To create a tensor, simply initiate it like so: >>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2])) You can also create a tensor from other Tensors: >>> T = Tensor(T, qml.PauliZ(4)) The ``@`` symbol can be used as a tensor product operation: >>> T = qml.PauliX(0) @ qml.Hadamard(2) """ # pylint: disable=abstract-method return_type = None tensor = True par_domain = None def __init__(self, *args): # pylint: disable=super-init-not-called self._eigvals_cache = None self.obs = [] for o in args: if isinstance(o, Tensor): self.obs.extend(o.obs) elif isinstance(o, Observable): self.obs.append(o) else: raise ValueError("Can only perform tensor products between observables.") def __copy__(self): cls = self.__class__ copied_op = cls.__new__(cls) copied_op.obs = self.obs.copy() copied_op._eigvals_cache = self._eigvals_cache return copied_op def __repr__(self): """Constructor-call-like representation.""" s = " @ ".join([repr(o) for o in self.obs]) if self.return_type is None: return s if self.return_type is Probability: return repr(self.return_type) + "(wires={})".format(self.wires.tolist()) return repr(self.return_type) + "(" + s + ")" @property def name(self): """All constituent observable names making up the tensor product. Returns: list[str]: list containing all observable names """ return [o.name for o in self.obs] @property def num_wires(self): """Number of wires the tensor product acts on. Returns: int: number of wires """ return len(self.wires) @property def wires(self): """All wires in the system the tensor product acts on. Returns: Wires: wires addressed by the observables in the tensor product """ return Wires.all_wires([o.wires for o in self.obs]) @property def data(self): """Raw parameters of all constituent observables in the tensor product. Returns: list[Any]: flattened list containing all dependent parameters """ return [p for sublist in [o.data for o in self.obs] for p in sublist] @property def num_params(self): """Raw parameters of all constituent observables in the tensor product. Returns: list[Any]: flattened list containing all dependent parameters """ return len(self.data) @property def parameters(self): """Evaluated parameter values of all constituent observables in the tensor product. Returns: list[list[Any]]: nested list containing the parameters per observable in the tensor product """ return [o.parameters for o in self.obs] @property def non_identity_obs(self): """Returns the non-identity observables contained in the tensor product. Returns: list[:class:`~.Observable`]: list containing the non-identity observables in the tensor product """ return [obs for obs in self.obs if not isinstance(obs, qml.Identity)] def __matmul__(self, other): if isinstance(other, Tensor): self.obs.extend(other.obs) return self if isinstance(other, Observable): self.obs.append(other) return self raise ValueError("Can only perform tensor products between observables.") def __rmatmul__(self, other): if isinstance(other, Observable): self.obs[:0] = [other] return self raise ValueError("Can only perform tensor products between observables.") __imatmul__ = __matmul__ @property def eigvals(self): """Return the eigenvalues of the specified tensor product observable. This method uses pre-stored eigenvalues for standard observables where possible. Returns: array[float]: array containing the eigenvalues of the tensor product observable """ if self._eigvals_cache is not None: return self._eigvals_cache standard_observables = {"PauliX", "PauliY", "PauliZ", "Hadamard"} # observable should be Z^{\otimes n} self._eigvals_cache = pauli_eigs(len(self.wires)) # Sort observables lexicographically by the strings of the wire labels # TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]), # Hermitian(obs, wires=[1, 3, 4]) # Sorting the observables based on wires, so that the order of # the eigenvalues is correct obs_sorted = sorted(self.obs, key=lambda x: [str(l) for l in x.wires.labels]) # check if there are any non-standard observables (such as Identity) if set(self.name) - standard_observables: # Tensor product of observables contains a mixture # of standard and non-standard observables self._eigvals_cache = np.array([1]) for k, g in itertools.groupby(obs_sorted, lambda x: x.name in standard_observables): if k: # Subgroup g contains only standard observables. self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g)))) else: # Subgroup g contains only non-standard observables. for ns_ob in g: # loop through all non-standard observables self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals) return self._eigvals_cache def diagonalizing_gates(self): """Return the gate set that diagonalizes a circuit according to the specified tensor observable. This method uses pre-stored eigenvalues for standard observables where possible and stores the corresponding eigenvectors from the eigendecomposition. Returns: list: list containing the gates diagonalizing the tensor observable """ diag_gates = [] for o in self.obs: diag_gates.extend(o.diagonalizing_gates()) return diag_gates @property def matrix(self): r"""Matrix representation of the tensor operator in the computational basis. **Example:** Note that the returned matrix *only includes explicitly declared observables* making up the tensor product; that is, it only returns the matrix for the specified subsystem it is defined for. >>> O = qml.PauliZ(0) @ qml.PauliZ(2) >>> O.matrix array([[ 1, 0, 0, 0], [ 0, -1, 0, 0], [ 0, 0, -1, 0], [ 0, 0, 0, 1]]) To get the full :math:`2^3\times 2^3` Hermitian matrix acting on the 3-qubit system, the identity on wire 1 must be explicitly included: >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2) >>> O.matrix array([[ 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., -1., 0., -0., 0., -0., 0., -0.], [ 0., 0., 1., 0., 0., 0., 0., 0.], [ 0., -0., 0., -1., 0., -0., 0., -0.], [ 0., 0., 0., 0., -1., -0., -0., -0.], [ 0., -0., 0., -0., -0., 1., -0., 0.], [ 0., 0., 0., 0., -0., -0., -1., -0.], [ 0., -0., 0., -0., -0., 0., -0., 1.]]) Returns: array: matrix representation """ # group the observables based on what wires they act on U_list = [] for _, g in itertools.groupby(self.obs, lambda x: x.wires.labels): # extract the matrices of each diagonalizing gate mats = [i.matrix for i in g] if len(mats) > 1: # multiply all unitaries together before appending mats = [multi_dot(mats)] # append diagonalizing unitary for specific wire to U_list U_list.append(mats[0]) # Return the Hermitian matrix representing the observable # over the defined wires. return functools.reduce(np.kron, U_list) def prune(self): """Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from the observables building up the :class:`~.Tensor`. The ``return_type`` attribute is preserved while pruning. If the tensor product only contains one observable, then this observable instance is returned. Note that, as a result, this method can return observables that are not a :class:`~.Tensor` instance. **Example:** Pruning that returns a :class:`~.Tensor`: >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2) >>> O.prune() <pennylane.operation.Tensor at 0x7fc1642d1590 >>> [(o.name, o.wires) for o in O.prune().obs] [('PauliZ', [0]), ('PauliZ', [2])] Pruning that returns a single observable: >>> O = qml.PauliZ(0) @ qml.Identity(1) >>> O_pruned = O.prune() >>> (O_pruned.name, O_pruned.wires) ('PauliZ', [0]) Returns: ~.Observable: the pruned tensor product of observables """ if len(self.non_identity_obs) == 0: # Return a single Identity as the tensor only contains Identities obs = qml.Identity(self.wires[0]) elif len(self.non_identity_obs) == 1: obs = self.non_identity_obs[0] else: obs = Tensor(*self.non_identity_obs) obs.return_type = self.return_type return obs # ============================================================================= # CV Operations and observables # ============================================================================= class CV: """A mixin base class denoting a continuous-variable operation.""" # pylint: disable=no-member def heisenberg_expand(self, U, wires): """Expand the given local Heisenberg-picture array into a full-system one. Args: U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``) wires (Wires): wires on the device the array ``U`` should be expanded to apply to Raises: ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect Returns: array[float]: expanded array, dimension ``1+2*num_wires`` """ U_dim = len(U) nw = len(self.wires) if U.ndim > 2: raise ValueError("Only order-1 and order-2 arrays supported.") if U_dim != 1 + 2 * nw: raise ValueError("{}: Heisenberg matrix is the wrong size {}.".format(self.name, U_dim)) if len(wires) == 0 or len(self.wires) == len(wires): # no expansion necessary (U is a full-system matrix in the correct order) return U if not wires.contains_wires(self.wires): raise ValueError( "{}: Some observable wires {} do not exist on this device with wires {}".format( self.name, self.wires, wires ) ) # get the indices that the operation's wires have on the device wire_indices = wires.indices(self.wires) # expand U into the I, x_0, p_0, x_1, p_1, ... basis dim = 1 + len(wires) * 2 def loc(w): "Returns the slice denoting the location of (x_w, p_w) in the basis." ind = 2 * w + 1 return slice(ind, ind + 2) if U.ndim == 1: W = np.zeros(dim) W[0] = U[0] for k, w in enumerate(wire_indices): W[loc(w)] = U[loc(k)] elif U.ndim == 2: if isinstance(self, Observable): W = np.zeros((dim, dim)) else: W = np.eye(dim) W[0, 0] = U[0, 0] for k1, w1 in enumerate(wire_indices): s1 = loc(k1) d1 = loc(w1) # first column W[d1, 0] = U[s1, 0] # first row (for gates, the first row is always (1, 0, 0, ...), but not for observables!) W[0, d1] = U[0, s1] for k2, w2 in enumerate(wire_indices): W[d1, loc(w2)] = U[s1, loc(k2)] # block k1, k2 in U goes to w1, w2 in W. return W @staticmethod def _heisenberg_rep(p): r"""Heisenberg picture representation of the operation. * For Gaussian CV gates, this method returns the matrix of the linear transformation carried out by the gate for the given parameter values. The method is not defined for non-Gaussian gates. **The existence of this method is equivalent to setting** ``grad_method = 'A'``. * For observables, returns a real vector (first-order observables) or symmetric matrix (second-order observables) of expansion coefficients of the observable. For single-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x, \p)`. For multi-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. .. note:: For gates, we assume that the inverse transformation is obtained by negating the first parameter. Args: p (Sequence[float]): parameter values for the transformation Returns: array[float]: :math:`\tilde{U}` or :math:`q` """ # pylint: disable=unused-argument return None @classproperty def supports_heisenberg(self): """Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep` static method, thereby indicating that it is Gaussian and does not block the use of the parameter-shift differentiation method if found between the differentiated gate and an observable. """ return CV._heisenberg_rep != self._heisenberg_rep class CVOperation(CV, Operation): """Base class for continuous-variable quantum operations.""" # pylint: disable=abstract-method @classproperty def supports_parameter_shift(self): """Returns True iff the CV Operation supports the parameter-shift differentiation method. This means that it has ``grad_method='A'`` and has overridden the :meth:`~.CV._heisenberg_rep` static method. """ return self.grad_method == "A" and self.supports_heisenberg def heisenberg_pd(self, idx): """Partial derivative of the Heisenberg picture transform matrix. Computed using grad_recipe. Args: idx (int): index of the parameter with respect to which the partial derivative is computed. Returns: array[float]: partial derivative """ # get the gradient recipe for this parameter recipe = self.grad_recipe[idx] # Default values multiplier = 0.5 a = 1 shift = np.pi / 2 # We set the default recipe to as follows: # ∂f(x) = c*f(x+s) - c*f(x-s) default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]] param_shift = default_param_shift if recipe is None else recipe pd = None # partial derivative of the transformation p = self.parameters original_p_idx = p[idx] for c, _a, s in param_shift: # evaluate the transform at the shifted parameter values p[idx] = _a * original_p_idx + s U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none if pd is None: pd = c * U else: pd += c * U return pd def heisenberg_tr(self, wires, inverse=False): r"""Heisenberg picture representation of the linear transformation carried out by the gate at current parameter values. Given a unitary quantum gate :math:`U`, we may consider its linear transformation in the Heisenberg picture, :math:`U^\dagger(\cdot) U`. If the gate is Gaussian, this linear transformation preserves the polynomial order of any observables that are polynomials in :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. This also means it maps :math:`\text{span}(\mathbf{r})` into itself: .. math:: U^\dagger \mathbf{r}_i U = \sum_j \tilde{U}_{ij} \mathbf{r}_j For Gaussian CV gates, this method returns the transformation matrix for the current parameter values of the Operation. The method is not defined for non-Gaussian (and non-CV) gates. Args: wires (Wires): wires on the device that the observable gets applied to inverse (bool): if True, return the inverse transformation instead Raises: RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method Returns: array[float]: :math:`\tilde{U}`, the Heisenberg picture representation of the linear transformation """ p = self.parameters if inverse: if self.par_domain == "A": # TODO: expand this for the new par domain class, for non-unitary matrices. p[0] = np.linalg.inv(p[0]) else: p[0] = -p[0] # negate first parameter U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none # not defined? if U is None: raise RuntimeError( "{} is not a Gaussian operation, or is missing the _heisenberg_rep method.".format( self.name ) ) return self.heisenberg_expand(U, wires) class CVObservable(CV, Observable): r"""Base class for continuous-variable observables. The class attribute :attr:`~.ev_order` can be defined to indicate to PennyLane whether the corresponding CV observable is a polynomial in the quadrature operators. If so, * ``ev_order = 1`` indicates a first order polynomial in quadrature operators :math:`(\x, \p)`. * ``ev_order = 2`` indicates a second order polynomial in quadrature operators :math:`(\x, \p)`. If :attr:`~.ev_order` is not ``None``, then the Heisenberg representation of the observable should be defined in the static method :meth:`~.CV._heisenberg_rep`, returning an array of the correct dimension. """ # pylint: disable=abstract-method ev_order = None #: None, int: if not None, the observable is a polynomial of the given order in `(x, p)`. def heisenberg_obs(self, wires): r"""Representation of the observable in the position/momentum operator basis. Returns the expansion :math:`q` of the observable, :math:`Q`, in the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. * For first-order observables returns a real vector such that :math:`Q = \sum_i q_i \mathbf{r}_i`. * For second-order observables returns a real symmetric matrix such that :math:`Q = \sum_{ij} q_{ij} \mathbf{r}_i \mathbf{r}_j`. Args: wires (Wires): wires on the device that the observable gets applied to Returns: array[float]: :math:`q` """ p = self.parameters U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none return self.heisenberg_expand(U, wires) def operation_derivative(operation) -> np.ndarray: r"""Calculate the derivative of an operation. For an operation :math:`e^{i \hat{H} \phi t}`, this function returns the matrix representation in the standard basis of its derivative with respect to :math:`t`, i.e., .. math:: \frac{d \, e^{i \hat{H} \phi t}}{dt} = i \phi \hat{H} e^{i \hat{H} \phi t}, where :math:`\phi` is a real constant. Args: operation (.Operation): The operation to be differentiated. Returns: array: the derivative of the operation as a matrix in the standard basis Raises: ValueError: if the operation does not have a generator or is not composed of a single trainable parameter """ generator, prefactor = operation.generator if generator is None: raise ValueError(f"Operation {operation.name} does not have a generator") if operation.num_params != 1: # Note, this case should already be caught by the previous raise since we haven't worked out # how to have an operator for multiple parameters. It is added here in case of a future # change raise ValueError( f"Operation {operation.name} is not written in terms of a single parameter" ) if not isinstance(generator, np.ndarray): generator = generator.matrix if operation.inverse: prefactor *= -1 generator = generator.conj().T return 1j * prefactor * generator @ operation.matrix
pennylane/operation.py
61,709
A mixin base class denoting a continuous-variable operation. Base class for continuous-variable observables. The class attribute :attr:`~.ev_order` can be defined to indicate to PennyLane whether the corresponding CV observable is a polynomial in the quadrature operators. If so, * ``ev_order = 1`` indicates a first order polynomial in quadrature operators :math:`(\x, \p)`. * ``ev_order = 2`` indicates a second order polynomial in quadrature operators :math:`(\x, \p)`. If :attr:`~.ev_order` is not ``None``, then the Heisenberg representation of the observable should be defined in the static method :meth:`~.CV._heisenberg_rep`, returning an array of the correct dimension. Base class for continuous-variable quantum operations. Base class for quantum channels. As with :class:`~.Operation`, the following class attributes must be defined for all channels: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` To define a noisy channel, the following attribute of :class:`~.Channel` can be used to list the corresponding Kraus matrices. * :attr:`~.Channel._kraus_matrices` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems the channel acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. Allows a class property to be defined Base class for diagonal quantum operations supported by a device. As with :class:`~.Operation`, the following class attributes must be defined for all operations: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Finally, there are some additional optional class attributes that may be set, and used by certain quantum optimizers: * :attr:`~.Operation.generator` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems it acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. Base class for observables supported by a device. :class:`Observable` is used to describe Hermitian quantum observables. As with :class:`~.Operator`, the following class attributes must be defined for all observables: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` Args: params (tuple[float, int, array, Variable]): observable parameters Keyword Args: wires (Sequence[int]): subsystems it acts on. Currently, only one subsystem is supported. do_queue (bool): Indicates whether the operation should be immediately pushed into the Operator queue. Enumeration class to represent the return types of an observable. Base class for quantum operations supported by a device. As with :class:`~.Operator`, the following class attributes must be defined for all operations: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` The following two class attributes are optional, but in most cases should be clearly defined to avoid unexpected behavior during differentiation. * :attr:`~.Operation.grad_method` * :attr:`~.Operation.grad_recipe` Finally, there are some additional optional class attributes that may be set, and used by certain quantum optimizers: * :attr:`~.Operation.generator` Args: params (tuple[float, int, array, Variable]): operation parameters Keyword Args: wires (Sequence[int]): Subsystems it acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operation should be immediately pushed into a :class:`BaseQNode` circuit queue. This flag is useful if there is some reason to run an Operation outside of a BaseQNode context. Base class for quantum operators supported by a device. The following class attributes must be defined for all Operators: * :attr:`~.Operator.num_params` * :attr:`~.Operator.num_wires` * :attr:`~.Operator.par_domain` Args: params (tuple[float, int, array, Variable]): operator parameters Keyword Args: wires (Iterable[Number, str], Number, str, Wires): Wires that the operator acts on. If not given, args[-1] is interpreted as wires. do_queue (bool): Indicates whether the operator should be immediately pushed into the Operator queue. Container class representing tensor products of observables. To create a tensor, simply initiate it like so: >>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2])) You can also create a tensor from other Tensors: >>> T = Tensor(T, qml.PauliZ(4)) The ``@`` symbol can be used as a tensor product operation: >>> T = qml.PauliX(0) @ qml.Hadamard(2) Integer enumeration class to represent the number of wires an operation acts on The addition operation between Observables/Tensors/qml.Hamiltonian objects. The scalar multiplication operation between a scalar and an Observable/Tensor. String representation of the return types. Constructor-call-like representation. Constructor-call-like representation. Constructor-call-like representation. The subtraction operation between Observables/Tensors/qml.Hamiltonian objects. Eigenvalues of the operator. This is a *class method* that should be defined for all new operations and observables that returns the eigenvalues of the operator. Note that the eigenvalues are not guaranteed to be in any particular order. This private method allows eigenvalues to be computed directly without instantiating the operators first. The default implementation relies on the presence of the :attr:`_matrix` method. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.RZ._eigvals(0.5) >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigenvalue representation Eigenvalues of the operator. The order of the eigenvalues needs to match the order of the computational basis vectors. This is a *class method* that must be defined for all new diagonal operations, that returns the eigenvalues of the operator in the computational basis. This private method allows eigenvalues to be computed directly without instantiating the operators first. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.RZ._eigvals(0.5) >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigenvalue representation Eigenvalues of the observable. The order of the eigenvalues needs to match the order of the computational basis vectors when the observable is diagonalized using :attr:`diagonalizing_gates`. This is a *class method* that must be defined for all new diagonal operations, that returns the eigenvalues of the operator in the computational basis. This private method allows eigenvalues to be computed directly without instantiating the operators first. To return the eigenvalues of *instantiated* operators, please use the :attr:`~.Operator.eigvals` property instead. **Example:** >>> qml.PauliZ._eigvals() >>> array([1, -1]) Returns: array: eigenvalue representation Heisenberg picture representation of the operation. * For Gaussian CV gates, this method returns the matrix of the linear transformation carried out by the gate for the given parameter values. The method is not defined for non-Gaussian gates. **The existence of this method is equivalent to setting** ``grad_method = 'A'``. * For observables, returns a real vector (first-order observables) or symmetric matrix (second-order observables) of expansion coefficients of the observable. For single-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x, \p)`. For multi-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. .. note:: For gates, we assume that the inverse transformation is obtained by negating the first parameter. Args: p (Sequence[float]): parameter values for the transformation Returns: array[float]: :math:`\tilde{U}` or :math:`q` Kraus matrices representing a quantum channel, specified in the computational basis. This is a class method that should be defined for all new channels. It returns the Kraus matrices representing the channel in the computational basis. This private method allows matrices to be computed directly without instantiating the channel first. **Example** >>> qml.AmplitudeDamping._kraus_matrices(0.1) >>> [array([[1. , 0. ], [0. , 0.9486833]]), array([[0. , 0.31622777], [0. , 0. ]])] To return the Kraus matrices of an *instantiated* channel, please use the :attr:`~.Operator.kraus_matrices` property instead. Returns: list(array): list of Kraus matrices Matrix representation of the operator in the computational basis. This is a *class method* that should be defined for all new operations and observables, that returns the matrix representing the operator in the computational basis. This private method allows matrices to be computed directly without instantiating the operators first. To return the matrices of *instantiated* operators, please use the :attr:`~.Operator.matrix` property instead. **Example:** >>> qml.RY._matrix(0.5) >>> array([[ 0.96891242+0.j, -0.24740396+0.j], [ 0.24740396+0.j, 0.96891242+0.j]]) Returns: array: matrix representation Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion. This allows for comparison between observables that are equivalent, but are expressed in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and `qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings. **Example** >>> tensor = qml.PauliX(0) @ qml.PauliZ(1) >>> print(tensor._obs_data()) {("PauliZ", <Wires = [1]>, ()), ("PauliX", <Wires = [0]>, ())} Get base name of the operator. Check the validity of a parameter. :class:`.Variable` instances can represent any real scalars (but not arrays). Args: p (Number, array, Variable): parameter to check flattened (bool): True means p is an element of a flattened parameter sequence (affects the handling of 'A' parameters) Raises: TypeError: parameter is not an element of the expected domain ValueError: parameter is an element of an unknown domain Returns: Number, array, Variable: p The class property decorator Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`, to determine if they are equivalent. Observables/Hamiltonians are equivalent if they represent the same operator (their matrix representations are equal), and they are defined on the same wires. .. Warning:: The compare method does **not** check if the matrix representation of a :class:`~.Hermitian` observable is equal to an equivalent observable expressed in terms of Pauli matrices. To do so would require the matrix form of Hamiltonians and Tensors be calculated, which would drastically increase runtime. Returns: (bool): True if equivalent. **Examples** >>> ob1 = qml.PauliX(0) @ qml.Identity(1) >>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)]) >>> ob1.compare(ob2) True >>> ob1 = qml.PauliX(0) >>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0) >>> ob1.compare(ob2) False Raw parameters of all constituent observables in the tensor product. Returns: list[Any]: flattened list containing all dependent parameters Returns a template decomposing the operation into other quantum operations. Returns the list of operations such that they diagonalize the observable in the computational basis. Returns: list(qml.Operation): A list of gates that diagonalize the observable in the computational basis. Return the gate set that diagonalizes a circuit according to the specified tensor observable. This method uses pre-stored eigenvalues for standard observables where possible and stores the corresponding eigenvectors from the eigendecomposition. Returns: list: list containing the gates diagonalizing the tensor observable Eigenvalues of an instantiated operator. Note that the eigenvalues are not guaranteed to be in any particular order. **Example:** >>> U = qml.RZ(0.5, wires=1) >>> U.eigvals >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigvals representation Eigenvalues of an instantiated diagonal operation. The order of the eigenvalues needs to match the order of the computational basis vectors. **Example:** >>> U = qml.RZ(0.5, wires=1) >>> U.eigvals >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j]) Returns: array: eigvals representation Eigenvalues of an instantiated observable. The order of the eigenvalues needs to match the order of the computational basis vectors when the observable is diagonalized using :attr:`diagonalizing_gates`. This is a requirement for using qubit observables in quantum functions. **Example:** >>> U = qml.PauliZ(wires=1) >>> U.eigvals >>> array([1, -1]) Returns: array: eigvals representation Return the eigenvalues of the specified tensor product observable. This method uses pre-stored eigenvalues for standard observables where possible. Returns: array[float]: array containing the eigenvalues of the tensor product observable Evaluate a single parameter. Generator of the operation. A length-2 list ``[generator, scaling_factor]``, where * ``generator`` is an existing PennyLane operation class or :math:`2\times 2` Hermitian array that acts as the generator of the current operation * ``scaling_factor`` represents a scaling factor applied to the generator operation For example, if :math:`U(\theta)=e^{i0.7\theta \sigma_x}`, then :math:`\sigma_x`, with scaling factor :math:`s`, is the generator of operator :math:`U(\theta)`: .. code-block:: python generator = [PauliX, 0.7] Default is ``[None, 1]``, indicating the operation has no generator. Multiplier and shift for the given parameter, based on its gradient recipe. Args: idx (int): parameter index Returns: float, float: multiplier, shift Gradient computation method. * ``'A'``: analytic differentiation using the parameter-shift method. * ``'F'``: finite difference numerical differentiation. * ``None``: the operation may not be differentiated. Default is ``'F'``, or ``None`` if the Operation has zero parameters. Expand the given local Heisenberg-picture array into a full-system one. Args: U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``) wires (Wires): wires on the device the array ``U`` should be expanded to apply to Raises: ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect Returns: array[float]: expanded array, dimension ``1+2*num_wires`` Representation of the observable in the position/momentum operator basis. Returns the expansion :math:`q` of the observable, :math:`Q`, in the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. * For first-order observables returns a real vector such that :math:`Q = \sum_i q_i \mathbf{r}_i`. * For second-order observables returns a real symmetric matrix such that :math:`Q = \sum_{ij} q_{ij} \mathbf{r}_i \mathbf{r}_j`. Args: wires (Wires): wires on the device that the observable gets applied to Returns: array[float]: :math:`q` Partial derivative of the Heisenberg picture transform matrix. Computed using grad_recipe. Args: idx (int): index of the parameter with respect to which the partial derivative is computed. Returns: array[float]: partial derivative Heisenberg picture representation of the linear transformation carried out by the gate at current parameter values. Given a unitary quantum gate :math:`U`, we may consider its linear transformation in the Heisenberg picture, :math:`U^\dagger(\cdot) U`. If the gate is Gaussian, this linear transformation preserves the polynomial order of any observables that are polynomials in :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`. This also means it maps :math:`\text{span}(\mathbf{r})` into itself: .. math:: U^\dagger \mathbf{r}_i U = \sum_j \tilde{U}_{ij} \mathbf{r}_j For Gaussian CV gates, this method returns the transformation matrix for the current parameter values of the Operation. The method is not defined for non-Gaussian (and non-CV) gates. Args: wires (Wires): wires on the device that the observable gets applied to inverse (bool): if True, return the inverse transformation instead Raises: RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method Returns: array[float]: :math:`\tilde{U}`, the Heisenberg picture representation of the linear transformation Inverts the operation, such that the inverse will be used for the computations by the specific device. This method concatenates a string to the name of the operation, to indicate that the inverse will be used for computations. Any subsequent call of this method will toggle between the original operation and the inverse of the operation. Returns: :class:`Operator`: operation to be inverted Boolean determining if the inverse of the operation was requested. Kraus matrices of an instantiated channel in the computational basis. ** Example** >>> U = qml.AmplitudeDamping(0.1, wires=1) >>> U.kraus_matrices >>> [array([[1. , 0. ], [0. , 0.9486833]]), array([[0. , 0.31622777], [0. , 0. ]])] Returns: list(array): list of Kraus matrices Returns the slice denoting the location of (x_w, p_w) in the basis. Matrix representation of an instantiated operator in the computational basis. **Example:** >>> U = qml.RY(0.5, wires=1) >>> U.matrix >>> array([[ 0.96891242+0.j, -0.24740396+0.j], [ 0.24740396+0.j, 0.96891242+0.j]]) Returns: array: matrix representation Matrix representation of the tensor operator in the computational basis. **Example:** Note that the returned matrix *only includes explicitly declared observables* making up the tensor product; that is, it only returns the matrix for the specified subsystem it is defined for. >>> O = qml.PauliZ(0) @ qml.PauliZ(2) >>> O.matrix array([[ 1, 0, 0, 0], [ 0, -1, 0, 0], [ 0, 0, -1, 0], [ 0, 0, 0, 1]]) To get the full :math:`2^3\times 2^3` Hermitian matrix acting on the 3-qubit system, the identity on wire 1 must be explicitly included: >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2) >>> O.matrix array([[ 1., 0., 0., 0., 0., 0., 0., 0.], [ 0., -1., 0., -0., 0., -0., 0., -0.], [ 0., 0., 1., 0., 0., 0., 0., 0.], [ 0., -0., 0., -1., 0., -0., 0., -0.], [ 0., 0., 0., 0., -1., -0., -0., -0.], [ 0., -0., 0., -0., -0., 1., -0., 0.], [ 0., 0., 0., 0., -0., -0., -1., -0.], [ 0., -0., 0., -0., -0., 0., -0., 1.]]) Returns: array: matrix representation String for the name of the operator. Get and set the name of the operator. All constituent observable names making up the tensor product. Returns: list[str]: list containing all observable names Returns the non-identity observables contained in the tensor product. Returns: list[:class:`~.Observable`]: list containing the non-identity observables in the tensor product Number of parameters the operator takes. Raw parameters of all constituent observables in the tensor product. Returns: list[Any]: flattened list containing all dependent parameters Number of wires the operator acts on. Number of wires the tensor product acts on. Returns: int: number of wires Calculate the derivative of an operation. For an operation :math:`e^{i \hat{H} \phi t}`, this function returns the matrix representation in the standard basis of its derivative with respect to :math:`t`, i.e., .. math:: \frac{d \, e^{i \hat{H} \phi t}}{dt} = i \phi \hat{H} e^{i \hat{H} \phi t}, where :math:`\phi` is a real constant. Args: operation (.Operation): The operation to be differentiated. Returns: array: the derivative of the operation as a matrix in the standard basis Raises: ValueError: if the operation does not have a generator or is not composed of a single trainable parameter Domain of the gate parameters. * ``'N'``: natural numbers (including zero). * ``'R'``: floats. * ``'A'``: arrays of real or complex values. * ``'L'``: list of arrays of real or complex values. * ``None``: if there are no parameters. Current parameter values. Fixed parameters are returned as is, free parameters represented by :class:`.Variable` instances are replaced by their current numerical value. Returns: list[Any]: parameter values Evaluated parameter values of all constituent observables in the tensor product. Returns: list[list[Any]]: nested list containing the parameters per observable in the tensor product Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from the observables building up the :class:`~.Tensor`. The ``return_type`` attribute is preserved while pruning. If the tensor product only contains one observable, then this observable instance is returned. Note that, as a result, this method can return observables that are not a :class:`~.Tensor` instance. **Example:** Pruning that returns a :class:`~.Tensor`: >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2) >>> O.prune() <pennylane.operation.Tensor at 0x7fc1642d1590 >>> [(o.name, o.wires) for o in O.prune().obs] [('PauliZ', [0]), ('PauliZ', [2])] Pruning that returns a single observable: >>> O = qml.PauliZ(0) @ qml.Identity(1) >>> O_pruned = O.prune() >>> (O_pruned.name, O_pruned.wires) ('PauliZ', [0]) Returns: ~.Observable: the pruned tensor product of observables Append the operator to the Operator queue. Set the function as a class method, and store as an attribute. Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep` static method, thereby indicating that it is Gaussian and does not block the use of the parameter-shift differentiation method if found between the differentiated gate and an observable. Returns True iff the CV Operation supports the parameter-shift differentiation method. This means that it has ``grad_method='A'`` and has overridden the :meth:`~.CV._heisenberg_rep` static method. Wires of this operator. Returns: Wires: wires All wires in the system the tensor product acts on. Returns: Wires: wires addressed by the observables in the tensor product This module contains the abstract base classes for defining PennyLane operations and observables. Description ----------- Qubit Operations ~~~~~~~~~~~~~~~~ The :class:`Operator` class serves as a base class for operators, and is inherited by both the :class:`Observable` class and the :class:`Operation` class. These classes are subclassed to implement quantum operations and measure observables in PennyLane. * Each :class:`~.Operator` subclass represents a general type of map between physical states. Each instance of these subclasses represents either - an application of the operator or - an instruction to measure and return the respective result. Operators act on a sequence of wires (subsystems) using given parameter values. * Each :class:`~.Operation` subclass represents a type of quantum operation, for example a unitary quantum gate. Each instance of these subclasses represents an application of the operation with given parameter values to a given sequence of wires (subsystems). * Each :class:`~.Observable` subclass represents a type of physical observable. Each instance of these subclasses represents an instruction to measure and return the respective result for the given parameter values on a sequence of wires (subsystems). Differentiation ^^^^^^^^^^^^^^^ In general, an :class:`Operation` is differentiable (at least using the finite-difference method) with respect to a parameter iff * the domain of that parameter is continuous. For an :class:`Operation` to be differentiable with respect to a parameter using the analytic method of differentiation, it must satisfy an additional constraint: * the parameter domain must be real. .. note:: These conditions are *not* sufficient for analytic differentiation. For example, CV gates must also define a matrix representing their Heisenberg linear transformation on the quadrature operators. For gates that *are* supported via the analytic method, the gradient recipe works as follows: .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k+s_i). where :math:`f` is the expectation value of an observable on a circuit that has been evolved by the operation being considered with parameter :math:`\phi_k`, there are multiple terms indexed with :math:`i` for each parameter :math:`\phi` and the :math:`[c_i, a_i, s_i]` are coefficients specific to the gate. The following specific case holds for example for qubit operations that are generated by one of the Pauli matrices and results in an overall positive and negative shift: .. math:: \frac{\partial}{\partial\phi_k}f = \frac{1}{2}\left[f \left( \phi_k+\frac{\pi}{2} \right) - f \left( \phi_k-\frac{\pi}{2} \right)\right], i.e., so that :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]`. CV Operation base classes ~~~~~~~~~~~~~~~~~~~~~~~~~ Due to additional requirements, continuous-variable (CV) operations must subclass the :class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation` and :class:`~.Observable`. Differentiation ^^^^^^^^^^^^^^^ To enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to provide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of the operation given its list of parameters, namely: * For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the operation on the vector of quadrature operators :math:`\mathbf{r}` for the given parameter values. * For Gaussian CV Observables this method should return a real vector (first-order observables) or symmetric matrix (second-order observables) of coefficients of the quadrature operators :math:`\x` and :math:`\p`. PennyLane uses the convention :math:`\mathbf{r} = (\I, \x, \p)` for single-mode operations and observables and :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)` for multi-mode operations and observables. .. note:: Non-Gaussian CV operations and observables are currently only supported via the finite-difference method of gradient computation. Copyright 2018-2020 Xanadu Quantum Technologies Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=protected-access ============================================================================= Wire types ============================================================================= ============================================================================= ObservableReturnTypes types ============================================================================= ============================================================================= Class property ============================================================================= pragma: no cover pylint: disable=too-few-public-methods ============================================================================= Base Operator class =============================================================================: bool: flag: should we perform a domain check for the parameters? The memo dict maps object ID to object, and is required by the deepcopy function to keep track of objects it has already deep copied. Shallow copy the list of parameters. We avoid a deep copy here, since PyTorch does not support deep copying of tensors within a differentiable computation. Deep copy everything else. pylint: disable=too-many-branches: str: name of the operator: int, None: index of the Operator in the circuit queue, or None if not in a queue: Wires: wires on which the operator acts check that the number of wires given corresponds to required number check the validity of the params: list[Any]: parameters of the operator FIXME using self.parameters here instead of self.data is dangerous, it assumes the data can be evaluated which is only true if something suitable happens to remain in VariableRef.positional_arg_values etc. after the last evaluation. pylint: disable=too-many-branches If parameter is a NumPy scalar, convert it into a Python scalar. p is not a Variable TODO profiling object arrays may have Variables inside them p is assumed to be a list of numpy arrays object arrays may have Variables inside them so pre-constructed Observable instances can be queued and returned in a single statement ============================================================================= Base Operation class ============================================================================= pylint: disable=abstract-method get the gradient recipe for this parameter Default values We set the default recipe following: ∂f(x) = c*f(x+s) - c*f(x-s) where we express a positive and a negative shift by default Parameter is a variable, we are in non-tape mode Need to use the internal multiplier in the Variable to update the multiplier and the shift Update the multiplier Update the shift zero multiplier means the shift is unimportant check the grad_method validity check the grad_recipe validity default recipe for every parameter pylint: disable=abstract-method pylint: disable=abstract-method ============================================================================= Base Observable class ============================================================================= pylint: disable=abstract-method extract the arguments pylint: disable=abstract-method pylint: disable=super-init-not-called observable should be Z^{\otimes n} Sort observables lexicographically by the strings of the wire labels TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]), Hermitian(obs, wires=[1, 3, 4]) Sorting the observables based on wires, so that the order of the eigenvalues is correct check if there are any non-standard observables (such as Identity) Tensor product of observables contains a mixture of standard and non-standard observables Subgroup g contains only standard observables. Subgroup g contains only non-standard observables. loop through all non-standard observables group the observables based on what wires they act on extract the matrices of each diagonalizing gate multiply all unitaries together before appending append diagonalizing unitary for specific wire to U_list Return the Hermitian matrix representing the observable over the defined wires. Return a single Identity as the tensor only contains Identities ============================================================================= CV Operations and observables ============================================================================= pylint: disable=no-member no expansion necessary (U is a full-system matrix in the correct order) get the indices that the operation's wires have on the device expand U into the I, x_0, p_0, x_1, p_1, ... basis first column first row (for gates, the first row is always (1, 0, 0, ...), but not for observables!) block k1, k2 in U goes to w1, w2 in W. pylint: disable=unused-argument pylint: disable=abstract-method get the gradient recipe for this parameter Default values We set the default recipe to as follows: ∂f(x) = c*f(x+s) - c*f(x-s) partial derivative of the transformation evaluate the transform at the shifted parameter values pylint: disable=assignment-from-none TODO: expand this for the new par domain class, for non-unitary matrices. negate first parameter pylint: disable=assignment-from-none not defined? pylint: disable=abstract-method: None, int: if not None, the observable is a polynomial of the given order in `(x, p)`. pylint: disable=assignment-from-none Note, this case should already be caught by the previous raise since we haven't worked out how to have an operator for multiple parameters. It is added here in case of a future change
33,729
en
0.708858
from __future__ import absolute_import, print_function from django.conf import settings CLIENT_ID = getattr(settings, "GITHUB_APP_ID", None) CLIENT_SECRET = getattr(settings, "GITHUB_API_SECRET", None) REQUIRE_VERIFIED_EMAIL = getattr(settings, "GITHUB_REQUIRE_VERIFIED_EMAIL", False) ERR_NO_ORG_ACCESS = "You do not have access to the required GitHub organization." ERR_NO_PRIMARY_EMAIL = ( "We were unable to find a primary email address associated with your GitHub account." ) ERR_NO_SINGLE_PRIMARY_EMAIL = ( "We were unable to find a single primary email address associated with your GitHub account." ) ERR_NO_VERIFIED_PRIMARY_EMAIL = ( "We were unable to find a verified, primary email address associated with your GitHub account." ) ERR_NO_SINGLE_VERIFIED_PRIMARY_EMAIL = "We were unable to find a single verified, primary email address associated with your GitHub account." # we request repo as we share scopes with the other GitHub integration SCOPE = "user:email,read:org,repo" # deprecated please use GITHUB_API_DOMAIN and GITHUB_BASE_DOMAIN DOMAIN = getattr(settings, "GITHUB_DOMAIN", "api.github.com") BASE_DOMAIN = getattr(settings, "GITHUB_BASE_DOMAIN", "github.com") API_DOMAIN = getattr(settings, "GITHUB_API_DOMAIN", DOMAIN) ACCESS_TOKEN_URL = "https://{0}/login/oauth/access_token".format(BASE_DOMAIN) AUTHORIZE_URL = "https://{0}/login/oauth/authorize".format(BASE_DOMAIN)
src/sentry/auth/providers/github/constants.py
1,417
we request repo as we share scopes with the other GitHub integration deprecated please use GITHUB_API_DOMAIN and GITHUB_BASE_DOMAIN
131
en
0.730895
from OpenGL.GL import * from .. GLGraphicsItem import GLGraphicsItem from .. MeshData import MeshData from pyqtgraph.Qt import QtGui import pyqtgraph as pg from .. import shaders import numpy as np __all__ = ['GLMeshItem'] class GLMeshItem(GLGraphicsItem): """ **Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>` Displays a 3D triangle mesh. """ def __init__(self, faces, vertexes=None): """ See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments. """ if isinstance(faces, MeshData): self.data = faces else: self.data = MeshData() self.data.setFaces(faces, vertexes) GLGraphicsItem.__init__(self) def initializeGL(self): self.shader = shaders.getShader('balloon') l = glGenLists(1) self.triList = l glNewList(l, GL_COMPILE) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable( GL_BLEND ) glEnable( GL_ALPHA_TEST ) #glAlphaFunc( GL_ALWAYS,0.5 ) glEnable( GL_POINT_SMOOTH ) glDisable( GL_DEPTH_TEST ) glColor4f(1, 1, 1, .1) glBegin( GL_TRIANGLES ) for face in self.data: for (pos, norm, color) in face: glColor4f(*color) glNormal3f(norm.x(), norm.y(), norm.z()) glVertex3f(pos.x(), pos.y(), pos.z()) glEnd() glEndList() #l = glGenLists(1) #self.meshList = l #glNewList(l, GL_COMPILE) #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) #glEnable( GL_BLEND ) #glEnable( GL_ALPHA_TEST ) ##glAlphaFunc( GL_ALWAYS,0.5 ) #glEnable( GL_POINT_SMOOTH ) #glEnable( GL_DEPTH_TEST ) #glColor4f(1, 1, 1, .3) #glBegin( GL_LINES ) #for f in self.faces: #for i in [0,1,2]: #j = (i+1) % 3 #glVertex3f(*f[i]) #glVertex3f(*f[j]) #glEnd() #glEndList() def paint(self): shaders.glUseProgram(self.shader) glCallList(self.triList) shaders.glUseProgram(0) #glCallList(self.meshList)
pyqtgraph/opengl/items/GLMeshItem.py
2,274
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>` Displays a 3D triangle mesh. See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments. glAlphaFunc( GL_ALWAYS,0.5 )l = glGenLists(1)self.meshList = lglNewList(l, GL_COMPILE)glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)glEnable( GL_BLEND )glEnable( GL_ALPHA_TEST )glAlphaFunc( GL_ALWAYS,0.5 )glEnable( GL_POINT_SMOOTH )glEnable( GL_DEPTH_TEST )glColor4f(1, 1, 1, .3)glBegin( GL_LINES )for f in self.faces:for i in [0,1,2]:j = (i+1) % 3glVertex3f(*f[i])glVertex3f(*f[j])glEnd()glEndList()glCallList(self.meshList)
609
en
0.360596
import os, sys try: import MacOS except: MacOS = None from pygame.pkgdata import getResource from pygame import sdlmain_osx __all__ = ['Video_AutoInit'] def Video_AutoInit(): """This is a function that's called from the c extension code just before the display module is initialized""" if MacOS and not MacOS.WMAvailable(): if not sdlmain_osx.WMEnable(): raise ImportError("Can not access the window manager. Use py2app or execute with the pythonw script.") if not sdlmain_osx.RunningFromBundleWithNSApplication(): try: default_icon_data = getResource('pygame_icon.tiff').read() except IOError: default_icon_data = None except NotImplementedError: default_icon_data = None sdlmain_osx.InstallNSApplication(default_icon_data) if (os.getcwd() == '/') and len(sys.argv) > 1: os.chdir(os.path.dirname(sys.argv[0])) return True
venv/Lib/site-packages/pygame/macosx.py
961
This is a function that's called from the c extension code just before the display module is initialized
104
en
0.938029
import sys from pathlib import Path, PurePath sys.path.append("./models/research/object_detection/") sys.path.append("./models/research/") import os import cv2 import numpy as np import tensorflow as tf from utils import label_map_util from utils import visualization_utils as vis_util from image_to_video_converter import images_to_video from PIL import Image class detector: def __init__(self, model_directory): model_path = os.path.join(model_directory, 'frozen_inference_graph.pb') labelmap_path = os.path.join(model_directory, 'labelmap.pbtxt') self.num_classes = 5 self.label_map = label_map_util.load_labelmap(labelmap_path) self.categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=self.num_classes, use_display_name=True) self.category_index = label_map_util.create_category_index(self.categories) self.detection_graph = tf.Graph() with self.detection_graph.as_default(): self.od_graph_def = tf.GraphDef() with tf.gfile.GFile(model_path, 'rb') as fid: self.serialized_graph = fid.read() self.od_graph_def.ParseFromString(self.serialized_graph) tf.import_graph_def(self.od_graph_def, name='') self.sess = tf.Session(graph=self.detection_graph) # Define input and output tensors (i.e. data) for the object detection classifier self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0') self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0') self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0') self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0') self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0') def draw_boxes_for_image(self, frame, min_score_threshold): frame_expanded = np.expand_dims(frame, axis=0) (boxes, scores, classes, num) = self.sess.run( [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections], feed_dict={self.image_tensor: frame_expanded}) vis_util.visualize_boxes_and_labels_on_image_array( frame, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), self.category_index, use_normalized_coordinates=True, line_thickness=2, min_score_thresh=min_score_threshold) """ print("Self cateogry index") print(self.category_index) print("Score/Classes") for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores),np.squeeze(classes).astype(np.int32)): print(score, cls, self.category_index[cls]) """ good_boxes = [box for box, score, cls in zip(np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes).astype(np.int32)) if score >= min_score_threshold and 'traffic' not in self.category_index[cls]['name']] return frame, good_boxes @staticmethod def denormalize(box, width, height): # Order taken from: https://www.tensorflow.org/api_docs/python/tf/image/draw_bounding_boxes y_min, x_min, y_max, x_max = box[0], box[1], box[2], box[3] x_min *= width x_max *= width y_min *= height y_max *= height return [x_min, x_max, y_min, y_max] @staticmethod def log_boxes(frame_number, boxes, ofile, width, height): for box in boxes: box = detector.denormalize(box, width, height) # Cast float coordinates to integers box = map(int, box) box = [frame_number] + list(box) line = "|".join(map(str, box)) print(line, file=ofile) def process_image(self, video_name, frame_number, image_path, min_score_threshold, output_path, save_images): image = cv2.imread(image_path) image_name = Path(image_path).stem result_frame = None # Set up logging file log_name = os.path.join(output_path, f"{video_name}_log.txt") with open(log_name, 'a') as log_file: print("At Frame:", frame_number) frame = np.array(image) # Draw boxes frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold) height, width, layers = frame.shape # Log boxes detector.log_boxes(frame_number, boxes, log_file, width, height) # Save frame with boxes for output result_frame = frame # Save frame with boxes if save_images: frame_path = os.path.join(output_path, f"{video_name}_frame_{image_name}.png") print("Saving image at", frame_path) vis_util.save_image_array_as_png(frame, frame_path) return result_frame def process_image_folder(self, folder_path, min_score_threshold, output_path, save_images): folder_name = Path(folder_path).stem frames = [] file_names = os.listdir(folder_path) file_names.sort() for f in file_names: image_path = os.path.join(folder_path, f) if os.path.isfile(image_path): frame_number = len(frames) next_frame = self.process_image(folder_name, frame_number, image_path, min_score_threshold, output_path, save_images) frames.append(next_frame) if save_images: video_path = os.path.join(output_path, folder_name) video = cv2.VideoCapture(video_path) print("Saving video at", video_path) images_to_video(frames, video_path, 30) def process_video(self, video_path, min_score_threshold, output_path, save_images): video_name = Path(video_path).stem # Open video file video = cv2.VideoCapture(video_path) # Set up logging file log_name = os.path.join(output_path, f"{video_name}_log.txt") with open(log_name, 'a') as log_file: frames = [] while(video.isOpened()): ret, frame = video.read() if not ret: break frame_number = len(frames) print("At Frame:", frame_number) # Draw boxes frame, boxes = self.draw_boxes_for_image(frame, min_score_threshold) height, width, layers = frame.shape # Log boxes detector.log_boxes(frame_number, boxes, log_file, width, height) # Save frame with boxes if save_images: frame_path = os.path.join(output_path, f"{video_name}_frame_{frame_number}.png") print("Saving image at", frame_path) vis_util.save_image_array_as_png(frame, frame_path) frames.append(frame) # Save as video if save_images: out_video_path = os.path.join(output_path, f"{video_name}.avi") print("Saving video at", out_video_path) images_to_video(frames, out_video_path, 30) # Clean up video.release() cv2.destroyAllWindows() def default_detector(): det = detector("./trained_model/detectors/") return det def default_inference(): det = default_detector() det.process_video("./data/SignaledJunctionRightTurn_1.avi", 0.70, "./output/temp/", False) return det if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--model_path', help='Path to the frozen inference graph and labelmap files', required=True) parser.add_argument('--video_path', help='Path to the video', required=True) parser.add_argument('--min_threshold', type=float, help='Minimum score threshold for a bounding box to be drawn', default=0.7) parser.add_argument('--output_path', help='Path for storing output images and/or logs', required=True) parser.add_argument('--save_images', action='store_true') args = parser.parse_args() det = detector(args.model_path) det.process_video(args.video_path, args.min_threshold, args.output_path, args.save_images)
detection.py
8,876
Define input and output tensors (i.e. data) for the object detection classifier Order taken from: https://www.tensorflow.org/api_docs/python/tf/image/draw_bounding_boxes Cast float coordinates to integers Set up logging file Draw boxes Log boxes Save frame with boxes for output Save frame with boxes Open video file Set up logging file Draw boxes Log boxes Save frame with boxes Save as video Clean up
402
en
0.678785
#!/usr/bin/env python # # mri_convert_ppc64 ds ChRIS plugin app # # (c) 2016-2019 Fetal-Neonatal Neuroimaging & Developmental Science Center # Boston Children's Hospital # # http://childrenshospital.org/FNNDSC/ # dev@babyMRI.org # import os import sys sys.path.append(os.path.dirname(__file__)) # import the Chris app superclass from chrisapp.base import ChrisApp Gstr_title = """ _ _ ____ ___ (_) | | / ___| / | _ __ ___ _ __ _ ___ ___ _ ____ _____ _ __| |_ _ __ _ __ ___/ /___ / /| | | '_ ` _ \| '__| | / __/ _ \| '_ \ \ / / _ \ '__| __| | '_ \| '_ \ / __| ___ \/ /_| | | | | | | | | | || (_| (_) | | | \ V / __/ | | |_ | |_) | |_) | (__| \_/ |\___ | |_| |_| |_|_| |_| \___\___/|_| |_|\_/ \___|_| \__| | .__/| .__/ \___\_____/ |_/ ______ ______| | | | |______| |______|_| |_| """ Gstr_synopsis = """ NAME mri_convert_ppc64.py SYNOPSIS python mri_convert_ppc64.py \\ [-h] [--help] \\ [--json] \\ [--man] \\ [--meta] \\ [--savejson <DIR>] \\ [-v <level>] [--verbosity <level>] \\ [--version] \\ [--inputFile <inputFile>] \\ [--outputFile <outputFile>] \\ [--executable <executable>] \\ [--execArgs <execArgs>] \\ <inputDir> \\ <outputDir> BRIEF EXAMPLE * Bare bones execution mkdir in out && chmod 777 out python mri_convert_ppc64.py \\ in out DESCRIPTION `mri_convert_ppc64.py` calls an underlying executable (typically 'mri_convert') and passes it an input and output spec. ARGS [--inputFile <inputFile>] The input file, relative to <inputDir>. [--outputFile <outputFile>] The output file, relative to <outpufDir>. [--executable <executable>] The actual executable to run. [--execArgs <execArgs>] Additional executable-specific command line args. [-h] [--help] If specified, show help message and exit. [--json] If specified, show json representation of app and exit. [--man] If specified, print (this) man page and exit. [--meta] If specified, print plugin meta data and exit. [--savejson <DIR>] If specified, save json representation file to DIR and exit. [-v <level>] [--verbosity <level>] Verbosity level for app. Not used currently. [--version] If specified, print version number and exit. """ class Mri_convert_ppc64(ChrisApp): """ This calls a pre-built PPC64 'mri_convert' that is housed in a base container.. """ AUTHORS = 'BU-2019-Power9 (dev@babyMRI.org)' SELFPATH = os.path.dirname(os.path.abspath(__file__)) SELFEXEC = os.path.basename(__file__) EXECSHELL = 'python3' TITLE = 'A PowerPPC plugin to run the FreeSurfer mri_convert' CATEGORY = '' TYPE = 'ds' DESCRIPTION = 'This calls a pre-built PPC64 mri_convert that is housed in a base container.' DOCUMENTATION = 'http://wiki' VERSION = '0.1' ICON = '' # url of an icon image LICENSE = 'Opensource (MIT)' MAX_NUMBER_OF_WORKERS = 1 # Override with integer value MIN_NUMBER_OF_WORKERS = 1 # Override with integer value MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m' MIN_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m' MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi' MIN_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi' MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin # Use this dictionary structure to provide key-value output descriptive information # that may be useful for the next downstream plugin. For example: # # { # "finalOutputFile": "final/file.out", # "viewer": "genericTextViewer", # } # # The above dictionary is saved when plugin is called with a ``--saveoutputmeta`` # flag. Note also that all file paths are relative to the system specified # output directory. OUTPUT_META_DICT = {} def define_parameters(self): """ Define the CLI arguments accepted by this plugin app. Use self.add_argument to specify a new app argument. """ self.add_argument('--executable', dest = 'executable', type = str, optional = True, help = 'the conversion program to use', default = '/usr/bin/mri_convert') self.add_argument('--inputFile', dest = 'inputFile', type = str, optional = True, help = 'the input file', default = '') self.add_argument('--outputFile', dest = 'outputFile', type = str, optional = True, help = 'the output file', default = '') self.add_argument('--execArgs', dest = 'execArgs', type = str, optional = True, help = 'additonal arguments for the chosen executable', default = '') def run(self, options): """ Define the code to be run by this plugin app. """ if not len(options.inputFile): print("ERROR: No input file has been specified!") print("You must specify an input file relative to the input directory.") sys.exit(1) if not len(options.outputFile): print("ERROR: No output file has been specified!") print("You must specicy an output file relative to the output directory.") sys.exit(1) str_cmd = '%s %s %s/%s %s/%s' % ( options.executable, options.execArgs, options.inputdir, options.inputFile, options.outputdir, options.outputFile) os.system(str_cmd) def show_man_page(self): """ Print the app's man page. """ print(Gstr_title) print(Gstr_synopsis) # ENTRYPOINT if __name__ == "__main__": chris_app = Mri_convert_ppc64() chris_app.launch()
mri_convert_ppc64/mri_convert_ppc64.py
8,729
This calls a pre-built PPC64 'mri_convert' that is housed in a base container.. Define the CLI arguments accepted by this plugin app. Use self.add_argument to specify a new app argument. Define the code to be run by this plugin app. Print the app's man page. !/usr/bin/env python mri_convert_ppc64 ds ChRIS plugin app (c) 2016-2019 Fetal-Neonatal Neuroimaging & Developmental Science Center Boston Children's Hospital http://childrenshospital.org/FNNDSC/ dev@babyMRI.org import the Chris app superclass url of an icon image Override with integer value Override with integer value Override with millicore value as string, e.g. '2000m' Override with millicore value as string, e.g. '2000m' Override with string, e.g. '1Gi', '2000Mi' Override with string, e.g. '1Gi', '2000Mi' Override with the minimum number of GPUs, as an integer, for your plugin Override with the maximum number of GPUs, as an integer, for your plugin Use this dictionary structure to provide key-value output descriptive information that may be useful for the next downstream plugin. For example: { "finalOutputFile": "final/file.out", "viewer": "genericTextViewer", } The above dictionary is saved when plugin is called with a ``--saveoutputmeta`` flag. Note also that all file paths are relative to the system specified output directory. ENTRYPOINT
1,433
en
0.700385
from typing import Tuple, FrozenSet from collections import Iterable from mathsat import msat_term, msat_env from mathsat import msat_make_constant, msat_declare_function from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type from mathsat import msat_make_and, msat_make_not, msat_make_or from mathsat import msat_make_leq, msat_make_equal from mathsat import msat_make_number, msat_make_plus from pysmt.environment import Environment as PysmtEnv import pysmt.typing as types from ltl.ltl import TermMap, LTLEncoder from utils import name_next, symb_to_next from hint import Hint, Location def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term): geq = msat_make_geq(menv, arg0, arg1) return msat_make_not(menv, geq) def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term): return msat_make_leq(menv, arg1, arg0) def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term): leq = msat_make_leq(menv, arg0, arg1) return msat_make_not(menv, leq) def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term): n_arg0 = msat_make_not(menv, arg0) return msat_make_or(menv, n_arg0, arg1) def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term, msat_term, msat_term]: assert menv assert isinstance(menv, msat_env) assert enc assert isinstance(enc, LTLEncoder) bool_type = msat_get_bool_type(menv) real_type = msat_get_rational_type(menv) i = msat_declare_function(menv, "i", real_type) i = msat_make_constant(menv, i) r = msat_declare_function(menv, "r", real_type) r = msat_make_constant(menv, r) l = msat_declare_function(menv, "l", real_type) l = msat_make_constant(menv, l) inc_i = msat_declare_function(menv, "inc_i", bool_type) inc_i = msat_make_constant(menv, inc_i) x_i = msat_declare_function(menv, name_next("i"), real_type) x_i = msat_make_constant(menv, x_i) x_r = msat_declare_function(menv, name_next("r"), real_type) x_r = msat_make_constant(menv, x_r) x_l = msat_declare_function(menv, name_next("l"), real_type) x_l = msat_make_constant(menv, x_l) x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type) x_inc_i = msat_make_constant(menv, x_inc_i) curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i} zero = msat_make_number(menv, "0") one = msat_make_number(menv, "1") r_gt_0 = msat_make_gt(menv, r, zero) r_lt_l = msat_make_lt(menv, r, l) i_geq_0 = msat_make_geq(menv, i, zero) init = msat_make_and(menv, r_gt_0, r_lt_l) init = msat_make_and(menv, init, msat_make_and(menv, i_geq_0, msat_make_not(menv, inc_i))) init = msat_make_and(menv, init, msat_make_gt(menv, l, zero)) # r' = r trans = msat_make_equal(menv, x_r, r) # i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l i_lt_l = msat_make_lt(menv, i, l) x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i, msat_make_equal(menv, x_i, msat_make_plus(menv, i, one))) x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i), msat_make_equal(menv, x_i, i)) x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i) x_l_eq_l = msat_make_equal(menv, x_l, l) x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i, x_l_eq_l) trans = msat_make_and(menv, trans, msat_make_impl(menv, i_lt_l, x_i_eq_i_p_1_or_i_and_x_l_eq_l)) # i >= l -> i' = 0 & l' = l + 1 & !inc_i' i_geq_l = msat_make_geq(menv, i, l) x_i_eq_0 = msat_make_equal(menv, x_i, zero) x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one)) x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv, msat_make_and(menv, x_i_eq_0, x_l_eq_l_p_1), msat_make_not(menv, x_inc_i)) trans = msat_make_and(menv, trans, msat_make_impl(menv, i_geq_l, x_i_eq_0_and_x_l_eq_l_p_1)) # (G F inc_i) -> ! G F r > i G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i)) r_gt_i = msat_make_gt(menv, r, i) n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i))) ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i) return TermMap(curr2next), init, trans, ltl def hints(env: PysmtEnv) -> FrozenSet[Hint]: assert isinstance(env, PysmtEnv) mgr = env.formula_manager i = mgr.Symbol("i", types.REAL) r = mgr.Symbol("r", types.REAL) l = mgr.Symbol("l", types.REAL) inc_i = mgr.Symbol("inc_i", types.BOOL) symbs = frozenset([i, r, l, inc_i]) x_i = symb_to_next(mgr, i) x_r = symb_to_next(mgr, r) x_l = symb_to_next(mgr, l) x_inc_i = symb_to_next(mgr, inc_i) res = [] n0 = mgr.Real(0) n1 = mgr.Real(1) loc = Location(env, mgr.LE(r, n0)) loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1))) h_r = Hint("h_r1", env, frozenset([r]), symbs) h_r.set_locs([loc]) res.append(h_r) loc0 = Location(env, mgr.Not(inc_i)) loc0.set_progress(1, x_inc_i) loc1 = Location(env, inc_i, stutterT=x_inc_i) loc1.set_progress(0, mgr.Not(x_inc_i)) h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs) h_inc.set_locs([loc0, loc1]) res.append(h_inc) loc0 = Location(env, mgr.GE(i, n0)) loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1))) loc1 = Location(env, mgr.GE(i, n0)) loc1.set_progress(2, mgr.Equals(x_i, i)) loc2 = Location(env, mgr.GE(i, n0)) loc2.set_progress(0, mgr.Equals(x_i, i)) h_i = Hint("h_i4", env, frozenset([i]), symbs) h_i.set_locs([loc0, loc1, loc2]) res.append(h_i) loc = Location(env, mgr.GE(l, n0)) loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1))) h_l = Hint("h_l0", env, frozenset([l]), symbs) h_l.set_locs([loc]) res.append(h_l) loc = Location(env, mgr.Not(inc_i)) loc.set_progress(0, mgr.Not(x_inc_i)) h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs) h_inc.set_locs([loc]) res.append(h_inc) loc0 = Location(env, mgr.GE(r, n0)) loc0.set_progress(1, mgr.Equals(x_r, r)) loc1 = Location(env, mgr.GE(r, n0)) loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1))) loc2 = Location(env, mgr.GE(r, n0)) loc2.set_progress(0, mgr.Equals(x_r, r)) h_r = Hint("h_r4", env, frozenset([r]), symbs) h_r.set_locs([loc0, loc1, loc2]) res.append(h_r) loc0 = Location(env, mgr.GE(l, n0)) loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1))) loc1 = Location(env, mgr.GE(l, n0)) loc1.set_progress(0, mgr.Equals(x_l, l)) h_l = Hint("h_l2", env, frozenset([l]), symbs) h_l.set_locs([loc0, loc1]) res.append(h_l) loc0 = Location(env, mgr.Not(inc_i)) loc0.set_progress(1, x_inc_i) loc1 = Location(env, inc_i, stutterT=x_inc_i) loc1.set_progress(2, mgr.Not(x_inc_i)) loc2 = Location(env, mgr.Not(inc_i)) loc2.set_progress(0, mgr.Not(x_inc_i)) h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs) h_inc.set_locs([loc0, loc1, loc2]) res.append(h_inc) loc0 = Location(env, mgr.GE(r, n0)) loc0.set_progress(1, mgr.Equals(x_r, r)) loc1 = Location(env, mgr.GE(r, n0)) loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1))) h_r = Hint("h_r2", env, frozenset([r]), symbs) h_r.set_locs([loc0, loc1]) res.append(h_r) loc = Location(env, mgr.GE(r, n0)) loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1))) h_r = Hint("h_r0", env, frozenset([r]), symbs) h_r.set_locs([loc]) res.append(h_r) loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0), stutterT=mgr.Equals(x_i, mgr.Plus(i, l))) loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1))) loc1 = Location(env, mgr.GE(i, n0)) loc1.set_progress(0, mgr.Equals(x_i, i)) h_i = Hint("h_i3", env, frozenset([i]), symbs) h_i.set_locs([loc0, loc1]) res.append(h_i) stutter = mgr.Equals(x_i, i) loc = Location(env, mgr.GE(i, n0), stutterT=stutter) loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1))) h_i = Hint("h_i0", env, frozenset([i]), symbs) h_i.set_locs([loc]) res.append(h_i) return frozenset(res)
benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/12-extending_bound_39.py
8,625
r' = r i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l i >= l -> i' = 0 & l' = l + 1 & !inc_i' (G F inc_i) -> ! G F r > i
136
en
0.457986
# MIT License # Copyright (c) 2018 the NJUNMT-pytorch authors. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import random import time from copy import deepcopy import numpy as np import torch import yaml from tensorboardX import SummaryWriter from tqdm import tqdm from src.data.data_iterator import DataIterator from src.data.dataset import TextLineDataset, ZipDataset from src.data.vocabulary import Vocabulary from src.decoding import beam_search, ensemble_beam_search from src.decoding.beam_search import nmt_lm_fusion_beam_search from src.metric.bleu_scorer import SacreBLEUScorer from src.models import build_model from src.modules.criterions import NMTCriterion from src.optim import Optimizer from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler from src.utils.common_utils import * from src.utils.configs import default_configs, pretty_configs from src.utils.logging import * from src.utils.moving_average import MovingAverage BOS = Vocabulary.BOS EOS = Vocabulary.EOS PAD = Vocabulary.PAD def set_seed(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) random.seed(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True def load_model_parameters(path, map_location="cpu"): state_dict = torch.load(path, map_location=map_location) if "model" in state_dict: return state_dict["model"] return state_dict def split_shard(*inputs, split_size=1): if split_size <= 1: yield inputs else: lengths = [len(s) for s in inputs[-1]] # sorted_indices = np.argsort(lengths) # sorting inputs inputs = [ [inp[ii] for ii in sorted_indices] for inp in inputs ] # split shards total_batch = sorted_indices.shape[0] # total number of batches if split_size >= total_batch: yield inputs else: shard_size = total_batch // split_size _indices = list(range(total_batch))[::shard_size] + [total_batch] for beg, end in zip(_indices[:-1], _indices[1:]): yield (inp[beg:end] for inp in inputs) def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True): """ Args: eval ('bool'): indicator for eval/infer. Returns: """ def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True): batch_size = len(samples) sizes = [len(s) for s in samples] max_size = max(sizes) x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64') for ii in range(batch_size): x_np[ii, :sizes[ii]] = samples[ii] if batch_first is False: x_np = np.transpose(x_np, [1, 0]) x = torch.tensor(x_np) if cuda is True: x = x.cuda() return x seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x)) x = _np_pad_batch_2D(samples=seqs_x, pad=PAD, cuda=cuda, batch_first=batch_first) if seqs_y is None: return x seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y)) y = _np_pad_batch_2D(seqs_y, pad=PAD, cuda=cuda, batch_first=batch_first) return x, y def compute_forward(model, critic, seqs_x, eval=False, normalization=1.0, norm_by_words=False ): """ :type model: nn.Module :type critic: NMTCriterion """ x_inp = seqs_x[:, :-1].contiguous() x_label = seqs_x[:, 1:].contiguous() words_norm = x_label.ne(PAD).float().sum(1) if not eval: model.train() critic.train() # For training with torch.enable_grad(): log_probs = model(x_inp) loss = critic(inputs=log_probs, labels=x_label, reduce=False, normalization=normalization) if norm_by_words: loss = loss.div(words_norm).sum() else: loss = loss.sum() torch.autograd.backward(loss) return loss.item() else: model.eval() critic.eval() # For compute loss with torch.no_grad(): log_probs = model(x_inp) loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True) return loss.item() def loss_validation(model, critic, valid_iterator): """ :type model: Transformer :type critic: NMTCriterion :type valid_iterator: DataIterator """ n_sents = 0 n_tokens = 0.0 sum_loss = 0.0 valid_iter = valid_iterator.build_generator() for batch in valid_iter: _, seqs_x = batch n_sents += len(seqs_x) n_tokens += sum(len(s) for s in seqs_x) x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU) loss = compute_forward(model=model, critic=critic, seqs_x=x, eval=True) if np.isnan(loss): WARN("NaN detected!") sum_loss += float(loss) return float(sum_loss / n_sents) def bleu_validation(uidx, valid_iterator, model, bleu_scorer, vocab_tgt, batch_size, valid_dir="./valid", max_steps=10, beam_size=5, alpha=-1.0 ): model.eval() numbers = [] trans = [] infer_progress_bar = tqdm(total=len(valid_iterator), desc=' - (Infer) ', unit="sents") valid_iter = valid_iterator.build_generator(batch_size=batch_size) for batch in valid_iter: seq_nums = batch[0] numbers += seq_nums seqs_x = batch[1] infer_progress_bar.update(len(seqs_x)) x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU) with torch.no_grad(): word_ids = beam_search(nmt_model=model, beam_size=beam_size, max_steps=max_steps, src_seqs=x, alpha=alpha) word_ids = word_ids.cpu().numpy().tolist() # Append result for sent_t in word_ids: sent_t = [[wid for wid in line if wid != PAD] for line in sent_t] x_tokens = [] for wid in sent_t[0]: if wid == EOS: break x_tokens.append(vocab_tgt.id2token(wid)) if len(x_tokens) > 0: trans.append(vocab_tgt.tokenizer.detokenize(x_tokens)) else: trans.append('%s' % vocab_tgt.id2token(EOS)) origin_order = np.argsort(numbers).tolist() trans = [trans[ii] for ii in origin_order] infer_progress_bar.close() if not os.path.exists(valid_dir): os.mkdir(valid_dir) hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx)) with open(hyp_path, 'w') as f: for line in trans: f.write('%s\n' % line) with open(hyp_path) as f: bleu_v = bleu_scorer.corpus_bleu(f) return bleu_v def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None): """ Args: nmt_model: model. pretrain_path ('str'): path to pretrained model. map_dict ('dict'): mapping specific parameter names to those names in current model. exclude_prefix ('dict'): excluding parameters with specific names for pretraining. Raises: ValueError: Size not match, parameter name not match or others. """ if exclude_prefix is None: exclude_prefix = [] if pretrain_path != "": INFO("Loading pretrained model from {}".format(pretrain_path)) pretrain_params = torch.load(pretrain_path, map_location=device) for name, params in pretrain_params.items(): flag = False for pp in exclude_prefix: if name.startswith(pp): flag = True break if flag: continue INFO("Loading param: {}...".format(name)) try: nmt_model.load_state_dict({name: params}, strict=False) except Exception as e: WARN("{}: {}".format(str(Exception), e)) INFO("Pretrained model loaded.") def train(FLAGS): """ FLAGS: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # write log of training to file. write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) GlobalNames.USE_GPU = FLAGS.use_gpu if GlobalNames.USE_GPU: CURRENT_DEVICE = "cpu" else: CURRENT_DEVICE = "cuda:0" config_path = os.path.abspath(FLAGS.config_path) with open(config_path.strip()) as f: configs = yaml.load(f) INFO(pretty_configs(configs)) # Add default configs configs = default_configs(configs) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] GlobalNames.SEED = training_configs['seed'] set_seed(GlobalNames.SEED) best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary(**data_configs["vocabularies"][0]) train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"]) train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"]) train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], ), shuffle=training_configs['shuffle'] ) valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src, ), ) training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key']) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True) INFO('Done. Elapsed time {0}'.format(timer.toc())) lrate = optimizer_configs['learning_rate'] is_early_stop = False # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial model_collections = Collections() checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints'] ) best_model_saver = BestKSaver(save_prefix="{0}.best".format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs["num_kept_best_checkpoints"]) # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs) INFO(nmt_model) params_total = sum([p.numel() for n, p in nmt_model.named_parameters()]) params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1]) INFO('Total parameters: {}'.format(params_total)) INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding)) critic = NMTCriterion(label_smoothing=model_configs['label_smoothing']) INFO(critic) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 2. Move to GPU if GlobalNames.USE_GPU: nmt_model = nmt_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE) # 4. Build optimizer INFO('Building Optimizer...') optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'] ) # 5. Build scheduler for optimizer if needed if optimizer_configs['schedule_method'] is not None: if optimizer_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler(optimizer=optim, **optimizer_configs["scheduler_configs"] ) elif optimizer_configs['schedule_method'] == "noam": scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs']) elif optimizer_configs["schedule_method"] == "rsqrt": scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs']) else: WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method'])) scheduler = None else: scheduler = None # 6. build moving average if training_configs['moving_average_method'] is not None: ma = MovingAverage(moving_average_method=training_configs['moving_average_method'], named_params=nmt_model.named_parameters(), alpha=training_configs['moving_average_alpha']) else: ma = None INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if FLAGS.reload: checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [0])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] summary_writer = SummaryWriter(log_dir=FLAGS.log_path) cum_samples = 0 cum_words = 0 valid_loss = best_valid_loss = float('inf') # Max Float saving_files = [] # Timer for computing speed timer_for_speed = Timer() timer_for_speed.tic() INFO('Begin training...') while True: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx), total=len(training_iterator), unit="sents" ) for batch in training_iter: uidx += 1 if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss": scheduler.step(global_step=uidx) seqs_x = batch n_samples_t = len(seqs_x) n_words_t = sum(len(s) for s in seqs_x) cum_samples += n_samples_t cum_words += n_words_t train_loss = 0. optim.zero_grad() try: # Prepare data for seqs_x_t, in split_shard(seqs_x, split_size=training_configs['update_cycle']): x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU) loss = compute_forward(model=nmt_model, critic=critic, seqs_x=x, eval=False, normalization=n_samples_t, norm_by_words=training_configs["norm_by_words"]) train_loss += loss / x.size(1) optim.step() except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') oom_count += 1 optim.zero_grad() else: raise e if ma is not None and eidx >= training_configs['moving_average_start_epoch']: ma.step() training_progress_bar.update(n_samples_t) training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx)) training_progress_bar.set_postfix_str( 'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss)) summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx) # ================================================================================== # # Display some information if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']): # words per second and sents per second words_per_sec = cum_words / (timer.toc(return_seconds=True)) sents_per_sec = cum_samples / (timer.toc(return_seconds=True)) lrate = list(optim.get_lrate())[0] summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx) summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) # Reset timer timer.tic() cum_words = 0 cum_samples = 0 # ================================================================================== # # Loss Validation & Learning rate annealing if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug): if ma is not None: origin_state_dict = deepcopy(nmt_model.state_dict()) nmt_model.load_state_dict(ma.export_ma_params(), strict=False) valid_loss = loss_validation(model=nmt_model, critic=critic, valid_iterator=valid_iterator, ) model_collections.add_to_collection("history_losses", valid_loss) min_history_loss = np.array(model_collections.get_collection("history_losses")).min() summary_writer.add_scalar("loss", valid_loss, global_step=uidx) summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx) best_valid_loss = min_history_loss if ma is not None: nmt_model.load_state_dict(origin_state_dict) del origin_state_dict if optimizer_configs["schedule_method"] == "loss": scheduler.step(global_step=uidx, metric=best_valid_loss) # If model get new best valid bleu score if valid_loss < best_valid_loss: bad_count = 0 if is_early_stop is False: # 1. save the best model's parameters torch.save(nmt_model.state_dict(), best_model_prefix + ".final") # 2. save the best checkpoint model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) best_model_saver.save(global_step=uidx, metric=valid_loss, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) else: bad_count += 1 # At least one epoch should be traversed if bad_count >= training_configs['early_stop_patience'] and eidx > 0: is_early_stop = True WARN("Early Stop!") summary_writer.add_scalar("bad_count", bad_count, uidx) INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format( uidx, valid_loss, lrate, bad_count )) # ================================================================================== # # Saving checkpoints if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) if not is_early_stop: checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break def nmt_lm_fusion_translate(FLAGS): GlobalNames.USE_GPU = FLAGS.use_gpu config_path = os.path.abspath(FLAGS.config_path) with open(config_path.strip()) as f: configs = yaml.load(f) data_configs = configs['data_configs'] nmt_model_configs = configs['nmt_model_configs'] lm_model_configs = configs['lm_model_configs'] timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary(**data_configs["vocabularies"][0]) vocab_tgt = Vocabulary(**data_configs["vocabularies"][1]) valid_dataset = TextLineDataset(data_path=FLAGS.source_path, vocabulary=vocab_src) valid_iterator = DataIterator(dataset=valid_dataset, batch_size=FLAGS.batch_size, use_bucket=True, buffer_size=100000, numbering=True) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================================================================== # # Build Model & Sampler & Validation INFO('Building model...') timer.tic() nmt_model_path = FLAGS.nmt_model_path lm_model_path = FLAGS.lm_model_path nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, **nmt_model_configs) lm_model = build_model(n_words=vocab_tgt.max_n_words, **lm_model_configs) nmt_model.eval() lm_model.eval() INFO('Done. Elapsed time {0}'.format(timer.toc())) INFO('Reloading model parameters...') timer.tic() nmt_params = load_model_parameters(nmt_model_path, map_location="cpu") lm_params = load_model_parameters(lm_model_path, map_location="cpu") nmt_model.load_state_dict(nmt_params) lm_model.load_state_dict(lm_params) if GlobalNames.USE_GPU: nmt_model.cuda() lm_model.cuda() INFO('Done. Elapsed time {0}'.format(timer.toc())) INFO('Begin...') result_numbers = [] result = [] n_words = 0 timer.tic() infer_progress_bar = tqdm(total=len(valid_iterator), desc=' - (Infer) ', unit="sents") valid_iter = valid_iterator.build_generator() for batch in valid_iter: numbers, seqs_x = batch batch_size_t = len(seqs_x) x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU) with torch.no_grad(): word_ids = nmt_lm_fusion_beam_search(nmt_model=nmt_model, lm_model=lm_model, beam_size=FLAGS.beam_size, max_steps=FLAGS.max_steps, src_seqs=x, alpha=FLAGS.alpha, beta=FLAGS.beta) word_ids = word_ids.cpu().numpy().tolist() result_numbers += numbers # Append result for sent_t in word_ids: sent_t = [[wid for wid in line if wid != PAD] for line in sent_t] result.append(sent_t) n_words += len(sent_t[0]) infer_progress_bar.update(batch_size_t) infer_progress_bar.close() INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True)))) translation = [] for sent in result: samples = [] for trans in sent: sample = [] for w in trans: if w == vocab_tgt.EOS: break sample.append(vocab_tgt.id2token(w)) samples.append(vocab_tgt.tokenizer.detokenize(sample)) translation.append(samples) # resume the ordering origin_order = np.argsort(result_numbers).tolist() translation = [translation[ii] for ii in origin_order] with open(FLAGS.saveto, 'w') as f: for trans in translation: f.write("%s\n"%trans[0]) if __name__ == '__main__': _args = { "model_name": "test_rnnlm", "reload": False, "config_path": "./configs/test_rnnlm.yaml", "debug": True, "use_gpu": False, "task": "lm", "log_path": "/tmp", "saveto": "/tmp", "valid_path": "/tmp", } from src.bin import train as _train _train.run(**_args)
src/tasks/lm.py
28,718
:type model: nn.Module :type critic: NMTCriterion Args: nmt_model: model. pretrain_path ('str'): path to pretrained model. map_dict ('dict'): mapping specific parameter names to those names in current model. exclude_prefix ('dict'): excluding parameters with specific names for pretraining. Raises: ValueError: Size not match, parameter name not match or others. :type model: Transformer :type critic: NMTCriterion :type valid_iterator: DataIterator Args: eval ('bool'): indicator for eval/infer. Returns: FLAGS: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str MIT License Copyright (c) 2018 the NJUNMT-pytorch authors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sorting inputs split shards total number of batches For training For compute loss Append result write log of training to file. Add default configs ================================================================================== Load Data Generate target dictionary ================================ Begin ======================================== Build Model & Optimizer We would do steps below on after another 1. build models & criterion 2. move models & criterion to gpu if needed 3. load pre-trained model if needed 4. build optimizer 5. build learning rate scheduler if needed 6. load checkpoints if needed 0. Initial 1. Build Model & Criterion 2. Move to GPU 3. Load pretrained model if needed 4. Build optimizer 5. Build scheduler for optimizer if needed 6. build moving average Reload from latest checkpoint ================================================================================== Prepare training Max Float Timer for computing speed Build iterator and progress bar Prepare data ================================================================================== Display some information words per second and sents per second Reset timer ================================================================================== Loss Validation & Learning rate annealing If model get new best valid bleu score 1. save the best model's parameters 2. save the best checkpoint At least one epoch should be traversed ================================================================================== Saving checkpoints ================================================================================== Load Data Generate target dictionary ================================================================================== Build Model & Sampler & Validation Append result resume the ordering
3,600
en
0.64091
"""Class definition of the ZoneSpeaker.""" import bisect import functools from typing import Any, Callable, List, Tuple import simulation_groundtruth.srv as groundtruth_srv from simulation_evaluation.msg import Speaker as SpeakerMsg from simulation_groundtruth.msg import LabeledPolygon as LabeledPolygonMsg from simulation_groundtruth.msg import Lane as LaneMsg from simulation_groundtruth.msg import Section as SectionMsg import simulation.utils.road.sections.type as road_section_type from simulation.utils.geometry import Point from simulation.utils.road.sections import SurfaceMarking from .speaker import Speaker class ZoneSpeaker(Speaker): """Information about the zone of the road the car is in.""" def __init__( self, *, section_proxy: Callable[[], List[SectionMsg]], lane_proxy: Callable[[int], LaneMsg], obstacle_proxy: Callable[[int], List[LabeledPolygonMsg]], surface_marking_proxy: Callable[[int], List[LabeledPolygonMsg]], parking_proxy: Callable[[int], Any], intersection_proxy: Callable[[int], Any], overtaking_buffer: float = 2, start_zone_buffer: float = 1, end_zone_buffer: float = 1.5, yield_distance: Tuple[float, float] = (-0.6, -0.2), ): """Initialize zone speaker. Args: section_proxy: Returns all sections when called. lane_proxy: Returns a LaneMsg for each section. obstacle_proxy: function which returns obstacles in a section. surface_marking_proxy: function which returns surface_markings in a section. parking_proxy: function which returns parking msg in a section. intersection_proxy: function which returns intersection msg in a section. parking_spot_buffer: buffer around parking spots in which a parking attempt is also accepted overtaking_buffer: buffer around obstacles that the car is allowed to overtake start_zone_buffer: beginning of the road that is considered as a start zone end_zone_buffer: end of the road that is considered as the end yield_distance: interval before intersections that the vehicle must yield in """ super().__init__( section_proxy=section_proxy, lane_proxy=lane_proxy, obstacle_proxy=obstacle_proxy, surface_marking_proxy=surface_marking_proxy, intersection_proxy=intersection_proxy, ) self.get_parking_msgs = parking_proxy self.overtaking_buffer = overtaking_buffer self.start_zone_buffer = start_zone_buffer self.end_zone_buffer = end_zone_buffer self.yield_distance = yield_distance # Get total length. self.total_length = self.middle_line.length @functools.cached_property def overtaking_zones(self) -> List[Tuple[float, float]]: """Intervals in which the car is allowed to overtake along the :py:attr:`Speaker.middle_line`.""" # Get all obstacle polygons obstacles = list( lp.frame for sec in self.sections if sec.type != road_section_type.PARKING_AREA for lp in self.get_obstacles_in_section(sec.id) ) # Get blocked area polygons because the car # is allowed to drive onto the left lane there! surface_markings = list( surface_marking for sec in self.sections for surface_marking in self.get_surface_markings_in_section(sec.id) ) blocked_areas = [ sm.frame for sm in surface_markings if sm.id_ == SurfaceMarking.BLOCKED_AREA[0] ] # Intervals where polygons are along the middle line intervals = list( self.get_interval_for_polygon(obs) for obs in (obstacles + blocked_areas) ) if len(intervals) == 0: return [] zone_intervals = [ ( intervals[0][0] - self.overtaking_buffer, intervals[0][1] + self.overtaking_buffer, ) ] for start, end in intervals[1:]: last = zone_intervals[-1] # If the start of this section and end of the last overtaking zone # overlap the last interval is extended if start - self.overtaking_buffer < last[1]: zone_intervals[-1] = (last[0], end + self.overtaking_buffer) # Else a new interval is added else: zone_intervals.append( (start - self.overtaking_buffer, end + self.overtaking_buffer) ) # import rospy # rospy.loginfo(f"Obstacle zones: {zone_intervals}") return zone_intervals def _intersection_yield_zones(self, rule: int) -> List[Tuple[float, float]]: """Intervals in which the car is supposed to halt/stop (in front of intersections). Args: rule: only intersections with this rule are considered """ intervals = [] for sec in self.sections: if sec.type != road_section_type.INTERSECTION: continue # Get arclength of the last point of the middle line # at the intersection south opening intersection_msg = self.get_intersection(sec.id) arc_length = self.middle_line.project( Point(intersection_msg.south.middle_line[-1]) ) if intersection_msg.rule == rule: intervals.append( ( arc_length + self.yield_distance[0], arc_length + self.yield_distance[1], ) ) return intervals @functools.cached_property def stop_zones(self) -> List[Tuple[float, float]]: """Intervals in which the car is supposed to stop (in front of intersections).""" return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.STOP) @functools.cached_property def halt_zones(self) -> List[Tuple[float, float]]: """Intervals in which the car is supposed to halt (in front of intersections).""" return self._intersection_yield_zones(groundtruth_srv.IntersectionSrvResponse.YIELD) @functools.cached_property def speed_zones(self) -> List[Tuple[float, int]]: surface_markings = [ self.get_surface_markings_in_section(sec.id) for sec in self.sections ] surface_markings = [marking for sublist in surface_markings for marking in sublist] result = [] result.append((0, SpeakerMsg.SPEED_UNLIMITED_ZONE)) for marking in surface_markings: if ( SurfaceMarking.ZONE_10_START[0] <= marking.id_ <= SurfaceMarking.ZONE_90_START[0] ): limit = 10 * (marking.id_ - SurfaceMarking.ZONE_10_START[0] + 1) result.append( ( self.get_interval_for_polygon(marking.frame)[0], getattr(SpeakerMsg, f"SPEED_{limit}_ZONE"), ) ) if ( SurfaceMarking.ZONE_10_END[0] <= marking.id_ <= SurfaceMarking.ZONE_90_END[0] ): result.append( ( self.get_interval_for_polygon(marking.frame)[0], SpeakerMsg.SPEED_UNLIMITED_ZONE, ) ) return sorted(result, key=lambda tup: tup[0]) def _inside_any_interval(self, intervals: List[Tuple[float, float]]) -> bool: """Determine if the car is currently in any of the given intervals.""" beginnings = list(interval[0] for interval in intervals) endings = list(interval[1] for interval in intervals) b_idx = bisect.bisect_left(beginnings, self.arc_length) - 1 e_idx = bisect.bisect_left(endings, self.arc_length) - 1 # If the vehicle is in interval x then the beginning is before x # and ending is behind x return b_idx - e_idx == 1 def speak(self) -> List[SpeakerMsg]: """List of speaker msgs. Contents: * beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE, end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE, and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE, * close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE * before yield/stop lines \ -> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE, * parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE """ msgs = super().speak() def append_msg(t: int): msg = SpeakerMsg() msg.type = t msgs.append(msg) # Determine if car is in parking zone append_msg( SpeakerMsg.PARKING_ZONE if self.current_section.type == road_section_type.PARKING_AREA else SpeakerMsg.NO_PARKING_ZONE ) # Overtaking append_msg( SpeakerMsg.OVERTAKING_ZONE if self._inside_any_interval(self.overtaking_zones) else SpeakerMsg.NO_OVERTAKING_ZONE ) # Start/End zone if self.arc_length < self.start_zone_buffer: append_msg(SpeakerMsg.START_ZONE) elif self.arc_length + self.end_zone_buffer < self.total_length: append_msg(SpeakerMsg.DRIVING_ZONE) else: append_msg(SpeakerMsg.END_ZONE) # Stop / halt zone if self._inside_any_interval(self.halt_zones): append_msg(SpeakerMsg.HALT_ZONE) elif self._inside_any_interval(self.stop_zones): append_msg(SpeakerMsg.STOP_ZONE) else: append_msg(SpeakerMsg.NO_STOP_ZONE) # Speed zone for x, msg in reversed(self.speed_zones): if x + 0.5 < self.arc_length: # 50cm Threshold append_msg(msg) break return msgs
simulation/src/simulation_evaluation/src/speaker/speakers/zone.py
10,226
Information about the zone of the road the car is in. Initialize zone speaker. Args: section_proxy: Returns all sections when called. lane_proxy: Returns a LaneMsg for each section. obstacle_proxy: function which returns obstacles in a section. surface_marking_proxy: function which returns surface_markings in a section. parking_proxy: function which returns parking msg in a section. intersection_proxy: function which returns intersection msg in a section. parking_spot_buffer: buffer around parking spots in which a parking attempt is also accepted overtaking_buffer: buffer around obstacles that the car is allowed to overtake start_zone_buffer: beginning of the road that is considered as a start zone end_zone_buffer: end of the road that is considered as the end yield_distance: interval before intersections that the vehicle must yield in Determine if the car is currently in any of the given intervals. Intervals in which the car is supposed to halt/stop (in front of intersections). Args: rule: only intersections with this rule are considered Intervals in which the car is supposed to halt (in front of intersections). Intervals in which the car is allowed to overtake along the :py:attr:`Speaker.middle_line`. List of speaker msgs. Contents: * beginning of road -> :ref:`Speaker <speaker_msg>`.START_ZONE, end of road -> :ref:`Speaker <speaker_msg>`.END_ZONE, and in between -> :ref:`Speaker <speaker_msg>`.DRIVING_ZONE, * close to an obstacle -> :ref:`Speaker <speaker_msg>`.OVERTAKING_ZONE * before yield/stop lines -> :ref:`Speaker <speaker_msg>`.HALT_ZONE/SpeakerMsg.STOP_ZONE, * parking area -> :ref:`Speaker <speaker_msg>`.PARKING_ZONE Intervals in which the car is supposed to stop (in front of intersections). Class definition of the ZoneSpeaker. Get total length. Get all obstacle polygons Get blocked area polygons because the car is allowed to drive onto the left lane there! Intervals where polygons are along the middle line If the start of this section and end of the last overtaking zone overlap the last interval is extended Else a new interval is added import rospy rospy.loginfo(f"Obstacle zones: {zone_intervals}") Get arclength of the last point of the middle line at the intersection south opening If the vehicle is in interval x then the beginning is before x and ending is behind x Determine if car is in parking zone Overtaking Start/End zone Stop / halt zone Speed zone 50cm Threshold
2,531
en
0.908315
import argparse import xml.etree.cElementTree as etree import os from os import listdir from os.path import isfile, join import random def processMedlineFolder(medlineFolder,outFolder): """Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file Args: medlineFolder (folder): Medline XML folder containing abstracts outFolder (folder): Folder to save output data to Returns: Nothing """ abstractCount = 0 # List of all files in the directory files = [ f for f in listdir(medlineFolder) if isfile(join(medlineFolder, f)) ] # Filter for only XML files files = sorted([ f for f in files if f.endswith('xml') ]) outfile = join(outFolder,"countWordsError.txt") with open(outfile, "a") as result: # Iterate over all files for f in files: print("Processing %s" % f) fullpath = join(medlineFolder,f) # Iterate through the XML file and stop on each MedlineCitation for event, elem in etree.iterparse(fullpath, events=('start', 'end', 'start-ns', 'end-ns')): if (event=='end' and elem.tag=='MedlineCitation'): # Let's get the PMID and Abstract elements from the XML pmidElements = elem.findall('./PMID') abstractElements = elem.findall('./Article/Abstract/AbstractText') if len(pmidElements) != 1 or len(abstractElements) != 1: continue # Pull the values of the PMID and abstract elements pmid = pmidElements[0].text abstract = abstractElements[0].text if not abstract is None: # Do a very basic word count wordCount = len(abstract.split()) # Prepare and save output to file line = "%s\t%d\n" % (pmid,wordCount) result.write(line) abstractCount += 1 print("%d abstracts processed" % abstractCount) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Little toy example to "process" a Medline abstract file and gives naive word counts for each abstract') parser.add_argument('-i',required=True,help='Medline folder to process') parser.add_argument('-o',required=True,help='Output folder for word-counts') args = parser.parse_args() # Sometimes throw an error if random.random() > 0.5: raise RuntimeError("This sometimes throws an error") processMedlineFolder(args.i,args.o)
server/tools/CountWordsError/0.1/CountWordsError.py
2,283
Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file Args: medlineFolder (folder): Medline XML folder containing abstracts outFolder (folder): Folder to save output data to Returns: Nothing List of all files in the directory Filter for only XML files Iterate over all files Iterate through the XML file and stop on each MedlineCitation Let's get the PMID and Abstract elements from the XML Pull the values of the PMID and abstract elements Do a very basic word count Prepare and save output to file Sometimes throw an error
604
en
0.808287
import json import yaml """ SMock -- Serverboards Mock library -- Mock comfortably. This library helps to mock function and method calls, getting the data from an external yaml file. """ class MockWrapper: """ Wraps all the data returned by the mocked function to behave like a dictionary, like an object, like a function, like a jsonable dict... like almost everything you may need """ def __init__(self, data): self.__data = data def __getattr__(self, key): if key not in self.__data: raise KeyError("'%s' not found in %s" % (key, self.__data.keys())) return self.__getitem__(key) def __call__(self): return wrapped(self.__data) def __getitem__(self, key): val = self.__data[key] if isinstance(val, (int, str)): return val return wrapped(val) def __str__(self): return str(self.__data) def __repr__(self): return repr(self.__data) def __eq__(self, other): return self.__data.__eq__(other) def __le__(self, other): return self.__data.__le__(other) def __ge__(self, other): return self.__data.__ge__(other) def __lt__(self, other): return self.__data.__lt__(other) def __gt__(self, other): return self.__data.__gt__(other) def __len__(self): return self.__data.__len__() def keys(self): return self.__data.keys() def get(self, key, defv=None): return self.__data.get(key, defv) class MockWrapperList(MockWrapper, list): def __init__(self, data): MockWrapper.__init__(self, data) list.__init__(self, data) class MockWrapperDict(MockWrapper, dict): def __init__(self, data): MockWrapper.__init__(self, data) dict.__init__(self, data) def wrapped(data): if isinstance(data, dict): return MockWrapperDict(data) if isinstance(data, list): return MockWrapperList(data) return MockWrapper(data) def mock_match(A, B): """ Checked for params on a mocked function is as expected It is necesary as sometimes we get a tuple and at the mock data we have lists. Examples: ``` >>> mock_match("A", "A") True >>> mock_match("A", "B") False >>> mock_match(["A", "B", "C"], ["A", "B", "C"]) True >>> mock_match(["A", "B", "C"], "*") True ``` """ if B == '*': # always match return True if isinstance(A, (tuple, list)): return all(mock_match(a, b) for (a, b) in zip(A, B)) return A == B def mock_res(name, data, args=[], kwargs={}): """ Given a name, data and call parameters, returns the mocked response If there is no matching response, raises an exception that can be used to prepare the mock data. This can be used for situations where you mock some function like data; for example at [Serverboards](https://serverboards.io), we use it to mock RPC calls. Its also used internally on every other mocking. """ data = data.get(name) if not data: raise Exception( "unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % ( name, json.dumps(args), json.dumps(kwargs) ) ) for res in data: if (mock_match(args, res.get("args")) and mock_match(kwargs, res.get("kwargs", {}))): if 'error' in res: raise Exception(res["error"]) response = res["response"] if isinstance(response, (int, str)): return response return wrapped(response) raise Exception( "unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % ( name, json.dumps(args), json.dumps(kwargs) ) ) def mock_method(name, data): """ Returns a function that mocks an original function. """ def mockf(*args, **kwargs): return mock_res(name, data, args, kwargs) return mockf def mock_method_async(name, data): """ Returns an async function that mocks an original async function """ async def mockf(*args, **kwargs): return mock_res(name, data, args, kwargs) return mockf class SMock: """ Encapsulates mocking calls so it's easier to load data and mock methods Example: ```python >>> import requests >>> smocked = SMock("tests/data.yaml") >>> requests.get = smocked.mock_method("requests.get") >>> res = requests.get("https://mocked.url") >>> res.status_code 200 >>> res.content 'Gocha!' >>> res.json() {'text': 'Gocha too!'} ``` The mock file is a yaml file with each mocked function as keys, and `args`/`kwargs` as calling args and kwargs, and `response` the response. Check `tests/data.yaml` for an example at the source code. """ def __init__(self, mockfile): with open(mockfile) as fd: self._data = yaml.load(fd) def mock_res(self, name, args=[], kwargs={}): """ Calls `mock_res` Mock by args: ``` >>> smock = SMock("tests/data.yaml") >>> res = smock.mock_res("requests.get", ["https://mocked.url"]) >>> res.status_code 200 ``` Using "*" as args, as fallback. As there is no kwargs, use default: ``` >>> res = smock.mock_res("requests.get", ["https://error.mocked.url"]) >>> res.status_code 404 ``` Using "*" as kwargs: ``` >>> res = smock.mock_res("requests.get", ... ["https://mocked.url"], ... {'data': 'data'}) >>> res.status_code 200 >>> res.content 'Mocked query' ``` """ return mock_res(name, self._data, args, kwargs) def mock_method(self, name): """ Calls `mock_method` """ return mock_method(name, self._data) async def mock_method_async(self, name): """ Calls `mock_method_async` """ return await mock_method_async(name, self._data)
smock.py
6,196
Wraps all the data returned by the mocked function to behave like a dictionary, like an object, like a function, like a jsonable dict... like almost everything you may need Encapsulates mocking calls so it's easier to load data and mock methods Example: ```python >>> import requests >>> smocked = SMock("tests/data.yaml") >>> requests.get = smocked.mock_method("requests.get") >>> res = requests.get("https://mocked.url") >>> res.status_code 200 >>> res.content 'Gocha!' >>> res.json() {'text': 'Gocha too!'} ``` The mock file is a yaml file with each mocked function as keys, and `args`/`kwargs` as calling args and kwargs, and `response` the response. Check `tests/data.yaml` for an example at the source code. Checked for params on a mocked function is as expected It is necesary as sometimes we get a tuple and at the mock data we have lists. Examples: ``` >>> mock_match("A", "A") True >>> mock_match("A", "B") False >>> mock_match(["A", "B", "C"], ["A", "B", "C"]) True >>> mock_match(["A", "B", "C"], "*") True ``` Returns a function that mocks an original function. Calls `mock_method` Returns an async function that mocks an original async function Given a name, data and call parameters, returns the mocked response If there is no matching response, raises an exception that can be used to prepare the mock data. This can be used for situations where you mock some function like data; for example at [Serverboards](https://serverboards.io), we use it to mock RPC calls. Its also used internally on every other mocking. Calls `mock_res` Mock by args: ``` >>> smock = SMock("tests/data.yaml") >>> res = smock.mock_res("requests.get", ["https://mocked.url"]) >>> res.status_code 200 ``` Using "*" as args, as fallback. As there is no kwargs, use default: ``` >>> res = smock.mock_res("requests.get", ["https://error.mocked.url"]) >>> res.status_code 404 ``` Using "*" as kwargs: ``` >>> res = smock.mock_res("requests.get", ... ["https://mocked.url"], ... {'data': 'data'}) >>> res.status_code 200 >>> res.content 'Mocked query' ``` always match
2,088
en
0.738899
# -*- coding: utf-8 -*- """ Created on Tue Apr 14 13:27:33 2020 @author: Jin Dou """ import torch def buildDataLoader(*tensors,TorchDataSetType,oSamplerType=None,**Args): if(Args.get('DatasetArgs') != None): DataSetArgs = Args['DatasetArgs'] dataset = TorchDataSetType(*tensors,**DataSetArgs) else: dataset = TorchDataSetType(*tensors) if(Args.get('DataLoaderArgs') != None): DataLoaderArgs = Args['DataLoaderArgs'] if(oSamplerType == None or Args.get('SamplerArgs') == None): dataLoader = torch.utils.data.DataLoader(dataset,**DataLoaderArgs) else: SamplerArgs = Args.get('SamplerArgs') oSampler = oSamplerType(dataset,**SamplerArgs) dataLoader = torch.utils.data.DataLoader(dataset,sampler=oSampler,**DataLoaderArgs) else: dataLoader = torch.utils.data.DataLoader(dataset) return dataLoader class CPytorch: def __init__(self): self.Lib = self._ImportTorch() def _ImportTorch(self): import torch as root return root def _getNNAttr(self,name:str): import torch.nn as NN ans = getattr(NN,name) return ans class CTorchNNYaml(CPytorch): def __init__(self): super().__init__() def _readYaml(self,filePath): import yaml ans = None with open(filePath,'r') as stream: try: ans = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) return ans def _ParseType(self,conf:dict): if(conf['Type'] == 'Sequential'): return self.buildSequential(conf) def _subListToTuple(self,oInput): if type(oInput) == dict: for key in oInput: if(type(oInput[key]) == list): oInput[key] = tuple(oInput[key]) elif type(oInput) == list: for idx,attr in enumerate(oInput): if type(attr) == list: oInput[idx] = tuple(attr) else: raise ValueError("_subListToTuple: input should be dict or list") def buildSequential(self,conf:dict): oSeq = self.Lib.nn.Sequential() ModelConfList = conf['Model'] for idx,ModelConf in enumerate(ModelConfList): CModule = self._getNNAttr(ModelConf[0]) attr = ModelConf[1] oModule = None name = str(idx) if(len(ModelConf) > 2 and type(ModelConf[2]) == dict): '''if contain aux attribute''' auxAttr = ModelConf[2] if (auxAttr.get('name')!=None): ''' if aux attribute contain name attribute''' name = auxAttr['name'] if(type(attr) == list): if len(attr) == 0: oModule = CModule() elif(type(attr[0]) == list and type(attr[1]) == dict): self._subListToTuple(attr[0]) self._subListToTuple(attr[1]) oModule = CModule(*attr[0],**attr[1]) elif(any(type(x) not in [int,float,str,bool,list] for x in attr)): raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx)) else: self._subListToTuple(attr) oModule = CModule(*attr) elif(type(attr) == dict): self._subListToTuple(attr) oModule = CModule(**attr) else: raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx)) oSeq.add_module(name,oModule) return oSeq def __call__(self,confFile:str): yamlDict = self._readYaml(confFile) return self._ParseType(yamlDict)
StimRespFlow/DataProcessing/DeepLearning/Factory.py
3,996
Created on Tue Apr 14 13:27:33 2020 @author: Jin Dou -*- coding: utf-8 -*-
77
en
0.874943
""" scaffoldgraph tests.core.test_fragment """ import pytest from rdkit import Chem from scaffoldgraph.core.fragment import * @pytest.fixture(name='mol') def test_molecule(): smiles = 'CCN1CCc2c(C1)sc(NC(=O)Nc3ccc(Cl)cc3)c2C#N' return Chem.MolFromSmiles(smiles) def canon(smiles): """Canonicalize SMILES for safety. If canonicalization ever changes this should remain consistent""" return Chem.MolToSmiles(Chem.MolFromSmiles(smiles)) def test_murcko(mol): murcko = get_murcko_scaffold(mol, generic=False) assert Chem.MolToSmiles(murcko) == canon('O=C(Nc1ccccc1)Nc1cc2c(s1)CNCC2') murcko = get_murcko_scaffold(mol, generic=True) assert Chem.MolToSmiles(murcko) == canon('CC(CC1CCCCC1)CC1CC2CCCCC2C1') def test_annotation(mol): annotation = Chem.MolToSmiles(get_annotated_murcko_scaffold(mol)) annotation = annotation.replace('1*', '*') annotation = annotation.replace('2*', '*') annotation = annotation.replace('3*', '*') assert annotation.count('*') == 3 def test_murcko_all(mol): frags = get_all_murcko_fragments(mol, break_fused_rings=True) assert len(frags) == 6 frags = get_all_murcko_fragments(mol, break_fused_rings=False) assert len(frags) == 3 def test_murcko_next(mol): scf = get_murcko_scaffold(mol) frags_1 = get_next_murcko_fragments(scf, break_fused_rings=True) frags_1 = {Chem.MolToSmiles(x) for x in frags_1} assert len(frags_1) == 2 frags_2 = get_next_murcko_fragments(scf, break_fused_rings=False) frags_2 = {Chem.MolToSmiles(x) for x in frags_2} assert len(frags_2) == 2 assert len(frags_1.intersection(frags_2)) == 1 def test_collect_linker_atoms(): mol = Chem.MolFromSmiles('CCCCCCCCCc1ccccc1') remove_atoms = set() a = collect_linker_atoms(mol.GetAtomWithIdx(0), remove_atoms, True) assert len(a) == 1 assert len(remove_atoms) == 9 remove_atoms.clear() a = collect_linker_atoms(mol.GetAtomWithIdx(0), remove_atoms, False) assert len(a) == 1 assert len(remove_atoms) == 8
tests/core/test_fragment.py
2,044
Canonicalize SMILES for safety. If canonicalization ever changes this should remain consistent scaffoldgraph tests.core.test_fragment
133
en
0.760184
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock from oslo_utils.secretutils import md5 from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.kioxia import entities from cinder.volume.drivers.kioxia import kumoscale as kioxia from cinder.volume.drivers.kioxia import rest_client VOL_BACKEND_NAME = 'kioxia_kumoscale_1' VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2' VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' VOL_SIZE = 10 VOL_PROTOCOL = 'NVMeoF' SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77' CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da' CONN_HOST_NAME = 'devstack' CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \ 'beaae2de-3a97-4be1-a739-6ac4bc5bf138' success_prov_response = entities.ProvisionerResponse(None, None, "Success", "Success") fail_prov_response = entities.ProvisionerResponse(None, None, "Failure", "Failure") prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1') prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2') prov_location1 = entities.Location(VOL_UUID, prov_backend1) prov_location2 = entities.Location(VOL_UUID, prov_backend2) prov_volume = entities.VolumeProv(VOL_UUID, None, None, None, None, None, None, None, None, None, None, True, None, [prov_location1, prov_location2]) prov_volumes_response = entities.ProvisionerResponse([prov_volume]) no_entities_prov_response = entities.ProvisionerResponse([], None, "Success") class KioxiaVolumeTestCase(test.TestCase): @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') @mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale') def setUp(self, mock_kumoscale, mock_get_info): mock_get_info.return_value = success_prov_response mock_kumoscale.return_value = \ rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token') super(KioxiaVolumeTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = VOL_BACKEND_NAME self.cfg.url = 'dummyURL' self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE' self.cfg.cafile = 'dummy' self.cfg.num_replicas = 1 self.cfg.block_size = 512 self.cfg.max_iops_per_gb = 1000 self.cfg.desired_iops_per_gb = 1000 self.cfg.max_bw_per_gb = 1000 self.cfg.desired_bw_per_gb = 1000 self.cfg.same_rack_allowed = False self.cfg.max_replica_down_time = 5 self.cfg.span_allowed = True self.cfg.vol_reserved_space_percentage = 20 self.cfg.provisioning_type = 'THIN' self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg) self.driver.configuration.get = lambda *args, **kwargs: {} self.driver.num_replicas = 2 self.expected_stats = { 'volume_backend_name': VOL_BACKEND_NAME, 'vendor_name': 'KIOXIA', 'driver_version': self.driver.VERSION, 'storage_protocol': 'NVMeOF', 'consistencygroup_support': False, 'thin_provisioning_support': True, 'multiattach': False, 'total_capacity_gb': 1000, 'free_capacity_gb': 600 } @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') def test_get_kumoscale(self, mock_get_info): mock_get_info.return_value = success_prov_response result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token', 'cert') self.assertEqual(result.mgmt_ips, ['1.2.3.4']) self.assertEqual(result.port, '8090') self.assertEqual(result.token, 'token') @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_success(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = success_prov_response result = self.driver.create_volume(testvol) args, kwargs = mock_create_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name'][:27]) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.uuid, testvol['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_failure(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_exception(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_success(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = success_prov_response result = self.driver.delete_volume(testvol) mock_delete_volume.assert_any_call(testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_failure(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_exception(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target1 = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target1]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) result = self.driver.initialize_connection(testvol, testconn) mock_host_probe.assert_any_call(testconn['nqn'], testconn['uuid'], testconn['host'], 'Agent', 'cinder-driver-0.1', 30) mock_publish.assert_any_call(testconn['uuid'], testvol['id']) mock_get_volumes_by_uuid.assert_any_call(testvol['id']) mock_get_targets.assert_any_call(testconn['uuid'], testvol['id']) mock_get_backend_by_id.assert_any_call('dummy-pid-1') expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')], 'target_nqn': 'target.nqn', 'vol_uuid': testvol['id']} expected_data = { 'vol_uuid': testvol['id'], 'alias': testvol['name'], 'writable': True, 'volume_replicas': [expected_replica] } expected_result = { 'driver_volume_type': 'nvmeof', 'data': expected_data } self.assertDictEqual(result, expected_result) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = fail_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_exception( self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.side_effect = Exception() mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = fail_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.side_effect = Exception() mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = fail_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_volumes(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = no_entities_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.side_effect = Exception() mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = fail_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_targets(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = no_entities_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.side_effect = Exception() mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_backend(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = no_entities_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = success_prov_response result = self.driver.terminate_connection(testvol, testconn) mock_unpublish.assert_any_call(testconn['uuid'], testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_failure(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_exception(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats(self, mock_get_tenants): tenant = TenantEntity(1000, 400) mock_get_tenants.return_value = entities.ProvisionerResponse([tenant]) result = self.driver.get_volume_stats(True) mock_get_tenants.assert_any_call() self.assertDictEqual(result, self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_failure(self, mock_get_tenants): mock_get_tenants.return_value = fail_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_no_tenants(self, mock_get_tenants): mock_get_tenants.return_value = no_entities_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_exception(self, mock_get_tenants): mock_get_tenants.side_effect = Exception() self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_success(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = success_prov_response result = self.driver.create_snapshot(testsnap) args, kwargs = mock_create_snapshot.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testsnap['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_failure(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_exception(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_success(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = success_prov_response result = self.driver.delete_snapshot(testsnap) mock_delete_snapshot.assert_any_call(testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_failure(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_exception(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_success(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = success_prov_response result = self.driver.create_volume_from_snapshot(testvol, testsnap) args, kwargs = mock_create_snapshot_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_failure(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_exception( self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_success(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = success_prov_response new_size = VOL_SIZE + 2 result = self.driver.extend_volume(testvol, new_size) mock_expand_volume.assert_any_call(new_size, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_failure(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = fail_prov_response new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_exception(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.side_effect = Exception() new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_success(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = success_prov_response result = self.driver.create_cloned_volume(testvol, testvol) args, kwargs = mock_clone_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.volumeId, testvol['id']) self.assertEqual(mock_call.sourceVolumeId, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_failure(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_exception(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) def test_convert_host_name(self): name = 'ks-node3-000c2960a794-000c2960a797' result = self.driver._convert_host_name(name) expected = md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() self.assertEqual(result, expected) def test_create_export(self): result = self.driver.create_export(None, None, None) self.assertIsNone(result) def test_ensure_export(self): result = self.driver.ensure_export(None, None) self.assertIsNone(result) def test_remove_export(self): result = self.driver.remove_export(None, None) self.assertIsNone(result) def test_check_for_setup_error(self): result = self.driver.check_for_setup_error() self.assertIsNone(result) def _stub_volume(*args, **kwargs): volume = {'id': kwargs.get('id', VOL_UUID), 'name': kwargs.get('name', VOL_NAME), 'project_id': "test-project", 'display_name': kwargs.get('display_name', VOL_NAME), 'size': kwargs.get('size', VOL_SIZE), 'provider_location': kwargs.get('provider_location', None), 'volume_type_id': kwargs.get('volume_type_id', None)} return volume def _stub_connector(*args, **kwargs): connector = {'uuid': kwargs.get('uuid', CONN_UUID), 'nqn': kwargs.get('nqn', CONN_NQN), 'host': kwargs.get('host', CONN_HOST_NAME)} return connector def _stub_snapshot(*args, **kwargs): volume = {'id': kwargs.get('id', SNAP_UUID), 'name': kwargs.get('name', 'snap2000'), 'volume_id': kwargs.get('id', VOL_UUID)} return volume class TenantEntity: def __init__(self, capacity, consumed): self.tenantId = '0' self.capacity = capacity self.consumedCapacity = consumed class TargetEntity: def __init__(self, name, backend): self.targetName = name self.backend = backend class BackendEntity: def __init__(self, portals): self.portals = portals class PortalEntity: def __init__(self, ip, port, transport): self.ip = ip self.port = port self.transport = transport if __name__ == '__main__': unittest.main()
cinder/tests/unit/volume/drivers/test_kioxia.py
40,143
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
546
en
0.872906
# Generated by Django 3.1 on 2020-09-28 07:06 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('base', '0065_bugtracker'), ] operations = [ migrations.RenameField( model_name='bugtracker', old_name='bug', new_name='subject', ), migrations.RenameField( model_name='bugtracker', old_name='user_device_info', new_name='user_device_information', ), migrations.AddField( model_name='bugtracker', name='project', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.project'), ), migrations.AlterField( model_name='bugtracker', name='bug_severity', field=models.CharField(choices=[('Low', 'Low'), ('Minor', 'Minor'), ('Major', 'Major'), ('Critical', 'Critical'), ('Not a bug', 'Not a bug')], default=None, max_length=10), ), ]
base/migrations/0066_auto_20200928_0706.py
1,076
Generated by Django 3.1 on 2020-09-28 07:06
43
en
0.664026
"""Top-level package for pomdp-belief-tracking.""" __author__ = """sammie katt""" __email__ = "sammie.katt@gmail.com" __version__ = "0.1.0" from pomdp_belief_tracking import pf
pomdp_belief_tracking/__init__.py
179
Top-level package for pomdp-belief-tracking.
44
en
0.628961
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This package contains the behaviour of a generic seller AEA.""" from typing import cast from aea.skills.behaviours import TickerBehaviour from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage from packages.fetchai.protocols.oef_search.message import OefSearchMessage from packages.fetchai.skills.generic_seller.dialogues import ( LedgerApiDialogues, OefSearchDialogues, ) from packages.fetchai.skills.generic_seller.strategy import GenericStrategy DEFAULT_SERVICES_INTERVAL = 60.0 LEDGER_API_ADDRESS = "fetchai/ledger:0.3.0" class GenericServiceRegistrationBehaviour(TickerBehaviour): """This class implements a behaviour.""" def __init__(self, **kwargs): """Initialise the behaviour.""" services_interval = kwargs.pop( "services_interval", DEFAULT_SERVICES_INTERVAL ) # type: int super().__init__(tick_interval=services_interval, **kwargs) def setup(self) -> None: """ Implement the setup. :return: None """ strategy = cast(GenericStrategy, self.context.strategy) if strategy.is_ledger_tx: ledger_api_dialogues = cast( LedgerApiDialogues, self.context.ledger_api_dialogues ) ledger_api_msg = LedgerApiMessage( performative=LedgerApiMessage.Performative.GET_BALANCE, dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(), ledger_id=strategy.ledger_id, address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)), ) ledger_api_msg.counterparty = LEDGER_API_ADDRESS ledger_api_dialogues.update(ledger_api_msg) self.context.outbox.put_message(message=ledger_api_msg) self._register_agent() self._register_service() def act(self) -> None: """ Implement the act. :return: None """ # self._unregister_service() # self._register_service() def teardown(self) -> None: """ Implement the task teardown. :return: None """ self._unregister_service() self._unregister_agent() def _register_agent(self) -> None: """ Register the agent's location. :return: None """ strategy = cast(GenericStrategy, self.context.strategy) description = strategy.get_location_description() oef_search_dialogues = cast( OefSearchDialogues, self.context.oef_search_dialogues ) oef_search_msg = OefSearchMessage( performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description, ) oef_search_msg.counterparty = self.context.search_service_address oef_search_dialogues.update(oef_search_msg) self.context.outbox.put_message(message=oef_search_msg) self.context.logger.info("registering agent on SOEF.") def _register_service(self) -> None: """ Register the agent's service. :return: None """ strategy = cast(GenericStrategy, self.context.strategy) description = strategy.get_register_service_description() oef_search_dialogues = cast( OefSearchDialogues, self.context.oef_search_dialogues ) oef_search_msg = OefSearchMessage( performative=OefSearchMessage.Performative.REGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description, ) oef_search_msg.counterparty = self.context.search_service_address oef_search_dialogues.update(oef_search_msg) self.context.outbox.put_message(message=oef_search_msg) self.context.logger.info("registering service on SOEF.") def _unregister_service(self) -> None: """ Unregister service from the SOEF. :return: None """ strategy = cast(GenericStrategy, self.context.strategy) description = strategy.get_unregister_service_description() oef_search_dialogues = cast( OefSearchDialogues, self.context.oef_search_dialogues ) oef_search_msg = OefSearchMessage( performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description, ) oef_search_msg.counterparty = self.context.search_service_address oef_search_dialogues.update(oef_search_msg) self.context.outbox.put_message(message=oef_search_msg) self.context.logger.info("unregistering service from SOEF.") def _unregister_agent(self) -> None: """ Unregister agent from the SOEF. :return: None """ strategy = cast(GenericStrategy, self.context.strategy) description = strategy.get_location_description() oef_search_dialogues = cast( OefSearchDialogues, self.context.oef_search_dialogues ) oef_search_msg = OefSearchMessage( performative=OefSearchMessage.Performative.UNREGISTER_SERVICE, dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(), service_description=description, ) oef_search_msg.counterparty = self.context.search_service_address oef_search_dialogues.update(oef_search_msg) self.context.outbox.put_message(message=oef_search_msg) self.context.logger.info("unregistering agent from SOEF.")
packages/fetchai/skills/generic_seller/behaviours.py
6,589
This class implements a behaviour. Initialise the behaviour. Register the agent's location. :return: None Register the agent's service. :return: None Unregister agent from the SOEF. :return: None Unregister service from the SOEF. :return: None Implement the act. :return: None Implement the setup. :return: None Implement the task teardown. :return: None This package contains the behaviour of a generic seller AEA. -*- coding: utf-8 -*- ------------------------------------------------------------------------------ Copyright 2018-2019 Fetch.AI Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ------------------------------------------------------------------------------ type: int self._unregister_service() self._register_service()
1,245
en
0.692309
"""Convert MUSDB18 dataset to .wav format. Output .wav files contain 5 channels - `0` - The mixture, - `1` - The drums, - `2` - The bass, - `3` - The rest of the accompaniment, - `4` - The vocals. """ import argparse import os import subprocess import tempfile import librosa import numpy as np import soundfile as sf def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('origin_dataset_dir', help='Path of the original dataset (.mp4)', type=str) parser.add_argument('new_dataset_dir', help='Output path of .wav dataset', type=str) parser.add_argument('--sr', help='Sample rate. (Default: 22050) ', type=int, default=22050) args = parser.parse_args() origin_dataset_dir = args.origin_dataset_dir new_dataset_dir = args.new_dataset_dir if os.path.isdir(new_dataset_dir): raise FileExistsError(f'{new_dataset_dir} already exists.') else: os.mkdir(new_dataset_dir) os.mkdir(os.path.join(new_dataset_dir, 'train')) os.mkdir(os.path.join(new_dataset_dir, 'test')) with tempfile.TemporaryDirectory() as tmpdir: for subdir in ('train', 'test'): origin_dir = os.path.join(origin_dataset_dir, subdir) files = [f for f in os.listdir(origin_dir) if os.path.splitext(f)[1] == '.mp4'] for file in files: path = os.path.join(origin_dir, file) name = os.path.splitext(file)[0] wav_data = [] # Extract & save the sound of `ch` channel to a temp directory # and then concatenate all channels to a single .wav file for ch in range(5): temp_fn = f'{name}.{ch}.wav' out_path = os.path.join(tmpdir, temp_fn) subprocess.run(['ffmpeg', '-i', path, '-map', f'0:{ch}', out_path]) sound, _ = librosa.load(out_path, sr=args.sr, mono=True) wav_data.append(sound) wav_data = np.stack(wav_data, axis=1) out_path = os.path.join( new_dataset_dir, subdir, f'{name}.wav') sf.write(out_path, wav_data, args.sr) if __name__ == '__main__': main()
src/convert_to_wav.py
2,429
Convert MUSDB18 dataset to .wav format. Output .wav files contain 5 channels - `0` - The mixture, - `1` - The drums, - `2` - The bass, - `3` - The rest of the accompaniment, - `4` - The vocals. Extract & save the sound of `ch` channel to a temp directory and then concatenate all channels to a single .wav file
312
en
0.728448
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # RobotPy WPILib documentation build configuration file, created by # sphinx-quickstart on Sun Nov 2 21:31:04 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # # Imports # import sys import os from os.path import abspath, join, dirname sys.path.insert(0, abspath(join(dirname(__file__)))) # -- RTD configuration ------------------------------------------------ # on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org on_rtd = os.environ.get("READTHEDOCS", None) == "True" # This is used for linking and such so we link to the thing we're building rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest") if rtd_version not in ["stable", "latest"]: rtd_version = "stable" # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx_inline_tabs", "sphinxext.opengraph", "sphinx_reredirects", ] ogp_custom_meta_tags = [ '<meta property="og:ignore_canonical" content="true" />', '<meta name="theme-color" content="#3393d5" />', ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "RobotPy" copyright = "2014-2020, RobotPy development team" intersphinx_mapping = { "commandsv1": ( "https://robotpy.readthedocs.io/projects/commands-v1/en/%s/" % rtd_version, None, ), "commandsv2": ( "https://robotpy.readthedocs.io/projects/commands-v2/en/%s/" % rtd_version, None, ), "pyfrc": ( "https://robotpy.readthedocs.io/projects/pyfrc/en/%s/" % rtd_version, None, ), "networktables": ( "https://robotpy.readthedocs.io/projects/pynetworktables/en/%s/" % rtd_version, None, ), "wpilib": ( "https://robotpy.readthedocs.io/projects/wpilib/en/%s/" % rtd_version, None, ), "hal": ( "https://robotpy.readthedocs.io/projects/hal/en/%s/" % rtd_version, None, ), "robotpy_ext": ( "https://robotpy.readthedocs.io/projects/utilities/en/%s/" % rtd_version, None, ), "cscore": ( "https://robotpy.readthedocs.io/projects/cscore/en/%s/" % rtd_version, None, ), "frc": ("https://docs.wpilib.org/en/stable", None), } redirects = { "2020_notes": "upgrade_notes.html" } # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "2021" # The full version, including alpha/beta/rc tags. release = version autoclass_content = "both" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "default" if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: html_theme = "default" # Output file base name for HTML help builder. htmlhelp_basename = "RobotPy" # -- Options for LaTeX output --------------------------------------------- latex_elements = {} # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( "index", "RobotPy.tex", "RobotPy Documentation", "RobotPy development team", "manual", ) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", "RobotPy", "RobotPy Documentation", "RobotPy development team", "RobotPy", "One line description of project.", "Miscellaneous", ) ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = "RobotPy" epub_author = "RobotPy development team" epub_publisher = "RobotPy development team" epub_copyright = "2014-2020, RobotPy development team" # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # -- Custom Document processing ---------------------------------------------- from robotpy_sphinx.sidebar import generate_sidebar generate_sidebar( globals(), "robotpy", "https://raw.githubusercontent.com/robotpy/docs-sidebar/master/sidebar.toml", )
conf.py
5,644
!/usr/bin/env python3 -*- coding: utf-8 -*- RobotPy WPILib documentation build configuration file, created by sphinx-quickstart on Sun Nov 2 21:31:04 2014. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. Imports -- RTD configuration ------------------------------------------------ on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org This is used for linking and such so we link to the thing we're building -- General configuration ------------------------------------------------ Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. only import and set the theme if we're building docs locally Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ---------------------------------------------- Bibliographic Dublin Core info. A list of files that should not be packed into the epub file. -- Custom Document processing ----------------------------------------------
2,456
en
0.693206
# Space: O(n) # Time: O(n!) class CombinationIterator: def __init__(self, characters: str, combinationLength: int): self.data = characters self.res = self.combine(self.data, combinationLength) self.counter = 0 self.res_count = len(self.res) def next(self) -> str: if self.hasNext(): res = self.res[self.counter] self.counter += 1 return res def hasNext(self) -> bool: return self.counter < self.res_count def combine(self, data, length): if length > len(data): return [] def dfs(data, index, temp_res, res, length): if len(temp_res) == length: res.append(temp_res) return for i in range(index, len(data)): temp_res += data[i] dfs(data, i + 1, temp_res, res, length) temp_res = temp_res[:-1] return res return dfs(data, 0, '', [], length)
Algorithms/1286_Iterator_for_Combination/Python/Iterator_for_Combination_Solution_1.py
992
Space: O(n) Time: O(n!)
23
en
0.292249
# Copyright (c) 2018 by contributors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """Setup xlearn package.""" from __future__ import absolute_import import sys import os from setuptools import setup, find_packages sys.path.insert(0, '.') CURRENT_DIR = os.path.dirname(__file__) libpath_py = os.path.join(CURRENT_DIR, 'xlearn/libpath.py') libpath = {'__file__': libpath_py} exec(compile(open(libpath_py, "rb").read(), libpath_py, 'exec'), libpath, libpath) LIB_PATH = [os.path.relpath(libfile, CURRENT_DIR) for libfile in libpath['find_lib_path']()] print("Install libxlearn_api from: %s" % LIB_PATH) setup(name='xlearn', version=open(os.path.join(CURRENT_DIR, 'xlearn/VERSION')).read().strip(), description="xLearn Python Package", maintainer='Chao Ma', maintainer_email='mctt90@gmail.com', zip_safe=False, packages=find_packages(), # this will use MANIFEST.in during install where we specify additional files, # this is the golden line include_package_data=True, install_requires=[ "numpy", "scipy" ], data_files=[('xlearn', LIB_PATH)], license='Apache-2.0', classifiers=['License :: OSI Approved :: Apache Software License'], url='https://github.com/aksnzhy/xlearn')
python-package/setup.py
1,827
Setup xlearn package. Copyright (c) 2018 by contributors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. coding: utf-8 this will use MANIFEST.in during install where we specify additional files, this is the golden line
716
en
0.879126
# Copyright (c) 2015 Brian Haskin Jr. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE import os.path import socket import unittest from pyrimaa import aei, board from pyrimaa.aei import EngineController, EngineException, EngineResponse class MockEngine: def __init__(self, expected): self.log = None self.expected = expected self.event = 0 self._closed = False def is_running(self): return False if self._closed else True def send(self, msg): if self._closed: raise Exception("Mock engine send called after cleanup.") expected = self.expected[self.event] if expected[0] == "raise": self.event += 1 raise expected[1] if expected[0] != "s": raise Exception("Mock engine send called when expecting, %s" % (expected, )) if msg.rstrip() != expected[1]: raise Exception( "Mock engine send called with unexpected message (%s) expected (%s)." % (msg, expected[1])) self.event += 1 def readline(self, timeout=None): if self._closed: raise Exception("Mock engine readline called after cleanup.") expected = self.expected[self.event] if expected[0] != "r": raise Exception("Mock engine readline called when expecting, %s" % (expected[1], )) self.event += 1 return expected[1] def waitfor(self, msg, timeout=0.5): if self._closed: raise Exception("Mock engine waitfor called after cleanup.") msg = msg.rstrip() expected = self.expected[self.event] if expected[0] not in ["r", "raise"]: raise Exception("Mock engine waitfor called when expecting, %s" % (expected, )) responses = [] while expected[0] == "r" and expected[1] != msg: responses.append(expected[1]) self.event += 1 expected = self.expected[self.event] if expected[0] == "r" and msg == expected[1]: responses.append(expected[1]) elif expected[0] == "send_response": pass elif expected[0] == "raise": self.event += 1 raise expected[1]() else: raise Exception( "Mock engine waitfor called with unexpected message (%s)" % (msg, )) self.event += 1 return responses def cleanup(self): if self._closed: raise Exception("Mock engine cleanup called multiple times.") self._closed = True class MockLog: def __init__(self): self.debugging = "" self.information = "" self.warning = "" def debug(self, message): self.debugging += message + '\n' def info(self, message): self.information += message + '\n' def warn(self, message): self.warning += message + '\n' protocol0 = [ ("s", "aei"), ("r", "id name Mock0"), ("r", "id author Janzert"), ("r", "aeiok"), ("s", "isready"), ("r", "readyok"), ("s", "newgame"), ("s", "setposition w [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]" ), ] bad_protocol = [ ("s", "aei"), ("r", "protocol-version abc"), ("r", "id name Mock"), ("r", "id author Janzert"), ("r", "aeiok"), ("s", "isready"), ("r", "readyok"), ("s", "newgame"), ("s", "setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]" ), ("s", "go"), ("s", "stop"), ("s", "quit"), ] protocol1 = [ ("s", "aei"), ("r", "protocol-version 1"), ("r", "id name Mock"), ("r", "id author Janzert"), ("r", "aeiok"), ("s", "isready"), ("r", "log Engine running"), ("r", "readyok"), ("r", ""), ("r", "log Engine initialized"), ("s", "setoption name depth value 4"), ("s", "newgame"), ("s", "setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]" ), ("s", "go"), ("s", "stop"), ("r", "info depth 4"), ("r", "bestmove Hb2n Ed2n"), ("s", "makemove Hb2n Ed2n"), ("s", "go ponder"), ("s", "quit"), ] bad_isready_response = [ ("s", "aei"), ("r", "protocol-version 1"), ("r", "id name Mock"), ("r", "id author Janzert"), ("r", "aeiok"), ("s", "isready"), ("r", "readyok"), ("s", "newgame"), ("s", "isready"), ("r", "log Engine shutting down"), ("send_response",), ] aeiok_timeout = [ ("s", "aei"), ("raise", socket.timeout), ] aei_send_error = [ ("raise", IOError), ] class EngineControllerTest(unittest.TestCase): def test_protocol_versions(self): eng = MockEngine(protocol0) ctl = EngineController(eng) self.assertEqual(ctl.ident["name"], "Mock0") self.assertEqual(ctl.ident["author"], "Janzert") self.assertEqual(ctl.protocol_version, 0) ctl.newgame() pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP) ctl.setposition(pos) ctl.cleanup() # bad protocol version eng = MockEngine(bad_protocol) eng.log = MockLog() ctl = EngineController(eng) self.assertIn("Unrecognized protocol version", eng.log.warning) pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP) ctl.newgame() ctl.setposition(pos) ctl.go() ctl.stop() ctl.quit() def test_controller(self): eng = MockEngine(protocol1) ctl = EngineController(eng) self.assertEqual(ctl.ident["name"], "Mock") self.assertEqual(ctl.ident["author"], "Janzert") self.assertEqual(ctl.protocol_version, 1) self.assertEqual(ctl.is_running(), True) self.assertRaises(socket.timeout, ctl.get_response) resp = ctl.get_response() self.assertIsInstance(resp, EngineResponse) self.assertEqual(resp.type, "log") self.assertEqual(resp.message, eng.expected[eng.event - 1][1].lstrip("log ")) ctl.setoption("depth", 4) ctl.newgame() pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP) ctl.setposition(pos) ctl.go() ctl.stop() resp = ctl.get_response() self.assertEqual(resp.type, "info") self.assertEqual(resp.message, eng.expected[eng.event - 1][1].lstrip("info ")) resp = ctl.get_response() self.assertEqual(resp.type, "bestmove") self.assertEqual(resp.move, eng.expected[eng.event - 1][1].lstrip("bestmove ")) ctl.makemove("Hb2n Ed2n") ctl.go("ponder") ctl.quit() ctl.cleanup() # bad response to isready eng = MockEngine(bad_isready_response) ctl = EngineController(eng) ctl.newgame() self.assertRaises(EngineException, ctl.isready) # timeout waiting for aeiok eng = MockEngine(aeiok_timeout) self.assertRaises(EngineException, EngineController, eng) # IOError sending aei eng = MockEngine(aei_send_error) self.assertRaises(EngineException, EngineController, eng) def _check_engine(self, eng): self.assertEqual(eng.is_running(), True) eng.send("aei\n") response = eng.waitfor("aeiok") self.assertEqual(response[-1], "aeiok") self.assertRaises(socket.timeout, eng.readline, timeout=0.05) eng.send("isready\n") response = eng.readline() self.assertEqual(response, "readyok") eng.send("quit\n") eng.waitfor("log") self.assertRaises(EngineException, eng.waitfor, "invalid", timeout=0.05) eng.cleanup() self.assertEqual(eng.active, False) def test_stdioengine(self): eng = aei.get_engine("stdio", "simple_engine") self.assertIsInstance(eng, aei.StdioEngine) self._check_engine(eng) eng = aei.get_engine("stdio", "simple_engine", "aei") self._check_engine(eng) def test_socketengine(self): path = os.path.dirname(__file__) adapter = os.path.join(path, "socketadapter.py") eng = aei.get_engine("socket", adapter) self.assertIsInstance(eng, aei.SocketEngine) self._check_engine(eng) eng = aei.get_engine("socket", adapter, "aei") self.assertIsInstance(eng, aei.SocketEngine) self._check_engine(eng) eng = aei.get_engine("2008cc", adapter + " --legacy") self._check_engine(eng)
pyrimaa/tests/test_aei.py
9,665
Copyright (c) 2015 Brian Haskin Jr. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE bad protocol version bad response to isready timeout waiting for aeiok IOError sending aei
1,146
en
0.867949