id
int32
0
299
prompt
stringlengths
93
303
prompt_original
stringlengths
21
1.39k
intent
stringlengths
25
2.17k
main_tf
stringlengths
109
8.28k
checks_rego
stringlengths
171
6.45k
plan_json
stringlengths
1.41k
174k
200
I need a spot to keep some files where our team covers the costs, even when others download them.
Create a S3 bucket where the bucket owner pays for fees.
Has an aws_s3_bucket Has an aws_s3_bucket_request_payment_configuration with bucket = bucket id OR name with payer = "BucketOwner"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "example" { bucket = "mybucket" } resource "aws_s3_bucket_request_payment_configuration" "example" { bucket = aws_s3_bucket.exampl...
package aws_s3_bucket_payment_config import future.keywords.in default valid := false valid { # IF THE BUCKET IS REFERRED TO BY ID some bucket in input.configuration.root_module.resources bucket.type == "aws_s3_bucket" some payment_config in input.configuration.root_module.resources p...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_s3_bucket.example","mode":"managed","type":"aws_s3_bucket","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bucket":"mybucket","force_destroy":false,"reg...
201
I need a place to store files where I can keep past revisions around in case we ever need to roll something back.
Create a S3 bucket and an example versioning resource for the S3 bucket.
Has an aws_s3_bucket Has an aws_s3_bucket_versioning with bucket = bucket id OR name
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "example" { bucket = "example-bucket" } resource "aws_s3_bucket_versioning" "versioning_example" { bucket = aws_s3_bucket.example....
package aws_s3_bucket_versioning import future.keywords.in default valid := false valid { # IF THE BUCKET IS REFERRED TO BY ID some bucket in input.configuration.root_module.resources bucket.type == "aws_s3_bucket" some versioning in input.configuration.root_module.resources versioning.type == "aws_s...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_s3_bucket.example","mode":"managed","type":"aws_s3_bucket","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bucket":"example-bucket","force_destroy":fals...
202
I just need a simple place to keep some files online, and I don’t need any built‑in history or automatic backups for changes.
Create a S3 bucket with versioning disabled.
Has an aws_s3_bucket Has an aws_s3_bucket_versioning with bucket = bucket id OR name with versioning_configuration = { status = "Disabled" }
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "example" { bucket = "example-bucket" } resource "aws_s3_bucket_versioning" "versioning_example" { bucket = aws_s3_bucket.example....
package aws_s3_bucket_versioning import future.keywords.in default valid := false valid { # IF THE BUCKET IS REFERRED TO BY ID some bucket in input.configuration.root_module.resources bucket.type == "aws_s3_bucket" some versioning in input.configuration.root_module.resources versioning.type == "aws_s...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_s3_bucket.example","mode":"managed","type":"aws_s3_bucket","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bucket":"example-bucket","force_destroy":fals...
203
I want to keep track of activity in one storage area by having it write its history to another place, and I’d like all those history files grouped under a common label.
Create a S3 bucket and a second S3 bucket. Then create an example logging resource for the first S3 bucket that stores logs in the second bucket. Make sure the log object keys have a prefix of "log/".
Has two aws_s3_bucket Has an aws_s3_bucket_logging with bucket = bucket id OR name with target_bucket = second bucket id OR name with target_prefix = "log/"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "example" { bucket = "mybucket" } resource "aws_s3_bucket" "log_bucket" { bucket = "mylogbucket" } resource "aws_s3_bucket_loggin...
package s3_bucket_logging import future.keywords.in default valid := false valid { some bucket in input.configuration.root_module.resources bucket.type == "aws_s3_bucket" some other_bucket in input.configuration.root_module.resources other_bucket.type == "aws_s3_bucket" not bucket == other_bucket...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_s3_bucket.example","mode":"managed","type":"aws_s3_bucket","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bucket":"mybucket","force_destroy":false,"reg...
204
I need a place to keep some files, and I also want their access history saved somewhere separate so we can review it later without cluttering the main storage.
Create a S3 bucket and a configuration that stores server access logs into a second S3 bucket.
Has two aws_s3_bucket Has an aws_s3_bucket_logging with bucket = bucket id OR name with target_bucket = second bucket id OR name with target_prefix = "log/"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "example" { bucket = "mybucket" } resource "aws_s3_bucket" "log_bucket" { bucket = "mylogbucket" } resource "aws_s3_bucket_loggin...
package s3_bucket_logging import future.keywords.in default valid := false valid { some bucket in input.configuration.root_module.resources bucket.type == "aws_s3_bucket" some other_bucket in input.configuration.root_module.resources other_bucket.type == "aws_s3_bucket" not bucket == other_bucket...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_s3_bucket.example","mode":"managed","type":"aws_s3_bucket","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bucket":"mybucket","force_destroy":false,"reg...
205
I need a setup where students’ code from our class repository can be pulled down, run in a controlled environment, and have the results saved somewhere, and it should all work reliably without much manual involvement.
Infrastructure for an autograder for a CS class. The autograder needs to run students' code stored on GitHub in a specific environment and store the result. Accomplish this using AWS CodeBuild.
have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" argument with "source" argument block with "typ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder_high import rego.v1 codebuild_project_valid(codebuild_project) if { some artifact in codebuild_project.expressions.artifacts artifact.location artifact.name artifact.type some environment in codebuild_project.expressions.environment environment.comput...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
206
I need a setup that can automatically pull students’ code from our class repository, run it in a controlled environment, and save whatever it produces, all while keeping the whole process lightweight and reliable.
Infrastructure for an autograder for a CS class. The autograder needs to run students' code stored on GitHub in a specific environment and store the result. Accomplish this using the following services: AWS S3, AWS CodeBuild. Add any necessary services.
have one "aws_s3_bucket" resource have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with value "S3" with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" arg...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder_middle import rego.v1 codebuild_project_valid(codebuild_project, s3_bucket) if { some artifact in codebuild_project.expressions.artifacts s3_bucket.address in artifact.location.references artifact.name artifact.type.constant_value == "S3" some environment in...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
207
I need a setup that can pull students’ code from our class repository, run it in a controlled environment, and save whatever it outputs, all in a way that’s simple and reliable for the whole semester.
Infrastructure for an autograder for a CS class. The autograder needs to run students' code stored on GitHub in a specific environment and store the result. Accomplish this using the following resources: AWS S3 Bucket, AWS CodeBuild Project. Add any necessary resources.
have one "aws_s3_bucket" resource have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with value "S3" with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" arg...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder_middle import rego.v1 codebuild_project_valid(codebuild_project, s3_bucket) if { some artifact in codebuild_project.expressions.artifacts s3_bucket.address in artifact.location.references artifact.name artifact.type.constant_value == "S3" some environment in...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
208
I need a setup that can take students’ code from our class repository, run it in a controlled environment cut off from the outside world, and save the results somewhere, all without exposing anything to the open internet.
Infrastructure for an autograder for a CS class. The autograde needs to run students' code stored on GitHub in a specific environment and store the result. Make sure that the autograder prevents the students' code from reaching the internet. Accomplish this using AWS CodeBuild and AWS VPC.
have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with value "S3" with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" argument with "source" argument blo...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder_high_jail import rego.v1 codebuild_project_valid(codebuild_project, security_group, subnet, vpc) if { some artifact in codebuild_project.expressions.artifacts artifact.location artifact.type artifact.name some environment in codebuild_project.expressions.environment environment.compute_type ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
209
I need a setup where we can automatically run students’ code in a controlled environment, save whatever it produces, and make sure the code can’t talk to the outside world, all while keeping the whole thing fairly simple to operate.
Infrastructure for an autograder for a CS class. The autograde needs to run students' code stored on GitHub in a specific environment and store the result. Make sure that the autograder prevents the students' code from reaching the internet. Accomplish this using the following services: AWS S3, AWS CodeBuild, AWS VPC. ...
have one "aws_s3_bucket" resource have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with value "S3" with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" arg...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder import rego.v1 codebuild_project_valid(codebuild_project, s3_bucket, security_group, subnet, vpc) if { some artifact in codebuild_project.expressions.artifacts s3_bucket.address in artifact.location.references artifact.type.constant_value == "S3" artifact.name some environment in codebuild_pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
210
I need a setup where students’ code can be fetched, run in a controlled environment that can’t reach the outside world, and then have the results saved somewhere for later review, all while keeping the whole thing simple and secure.
Infrastructure for an autograder for a CS class. The autograde needs to run students' code stored on GitHub in a specific environment and store the result. Make sure that the autograder prevents the students' code from reaching the internet. Accomplish this using the following resources: AWS S3 Bucket, AWS CodeBuild P...
have one "aws_s3_bucket" resource have one "aws_codebuild_project" resource with "artifacts" argument block with "type" argument with value "S3" with "location" argument with "name" argument with "environment" argument block with "computer_type" argument with "image" argument with "type" arg...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "artifact_bucket" { bucket_prefix = "artifact-bucket-" } resource "aws_codebuild_project" "autogra...
package autograder import rego.v1 codebuild_project_valid(codebuild_project, s3_bucket, security_group, subnet, vpc) if { some artifact in codebuild_project.expressions.artifacts s3_bucket.address in artifact.location.references artifact.type.constant_value == "S3" artifact.name some environment in codebuild_pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_codebuild_project.autograder_build","mode":"managed","type":"aws_codebuild_project","name":"autograder_build","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"artifacts":...
211
I need a small always‑on machine in the cloud, and I want its data saved automatically every night without me having to manage that process myself.
An AWS service that provisions an EC2 instance and backs it up every day at midnight. Accomplish this using AWS Backup.
have one "aws_ami" data resource have one "aws_instance" resource with "ami" argument linking to the aws_ami data resource with "instance_type" argument have one "aws_backup_plan" resource with "name" argument with "rule" argument block with "rule_name" argument with "target_vault_name" argument linking...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_ami" "amzn2" { most_recent = true owners = ["amazon"] filter { name = "name" values = ["*ubunt...
package cloud_desktop_high import rego.v1 default valid := false instance_valid(instance, ami) if { instance.expressions.instance_type ami.address in instance.expressions.ami.references } backup_plan_valid(backup_plan, backup_vault) if { # advanced_backup_setting some backup_setting in backup_plan.expressions.a...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_backup_plan.cloud_desktop_backup","mode":"managed","type":"aws_backup_plan","name":"cloud_desktop_backup","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"advanced_backup...
212
I need a small always‑on compute environment for some lightweight tasks, and it should automatically save daily restore points without me having to manage that manually.
An AWS service that provisions an EC2 instance and backs it up every day at midnight. Accomplish this using the following services: AWS EC2, AWS Backup. Add any necessary services.
have one "aws_ami" data resource have one "aws_instance" resource with "ami" argument linking to the aws_ami data resource with "instance_type" argument have one "aws_backup_plan" resource with "name" argument with "rule" argument block with "rule_name" argument with "target_vault_name" argument linking...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_ami" "amzn2" { most_recent = true owners = ["amazon"] filter { name = "name" values = ["*ubunt...
package cloud_desktop_high import rego.v1 default valid := false instance_valid(instance, ami) if { instance.expressions.instance_type ami.address in instance.expressions.ami.references } backup_plan_valid(backup_plan, backup_vault) if { # advanced_backup_setting some backup_setting ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_backup_plan.cloud_desktop_backup","mode":"managed","type":"aws_backup_plan","name":"cloud_desktop_backup","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"advanced_backup...
213
I need a small always‑on workspace in the cloud, and it should automatically save its state every night so we can recover it easily if something goes wrong.
An AWS service that provisions an EC2 instance and backs it up every day at midnight. Accomplish this using the following resources: AWS EC2, AWS EC2 key pair, AWS Backup plan, AWS Backup vault. Add any necessary resources.
have one "aws_ami" data resource have one "aws_instance" resource with "ami" argument linking to the aws_ami data resource with "instance_type" argument have one "aws_backup_plan" resource with "name" argument with "rule" argument block with "rule_name" argument with "target_vault_name" argument linking...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_ami" "amzn2" { most_recent = true owners = ["amazon"] filter { name = "name" values = ["*ubun...
package cloud_desktop_high import rego.v1 default valid := false instance_valid(instance, ami) if { instance.expressions.instance_type ami.address in instance.expressions.ami.references } backup_plan_valid(backup_plan, backup_vault) if { # advanced_backup_setting some backup_setting ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_backup_plan.cloud_desktop_backup","mode":"managed","type":"aws_backup_plan","name":"cloud_desktop_backup","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"advanced_backup...
214
I need a way to deliver our site’s video content so that people around the world can load it quickly, while keeping things simple and making sure it only works in the countries we support.
The infrastructure for a video streaming site that distribute video content globally. Accomplish this using AWS CloudFront and AWS S3.
have one "aws_cloudfront_origin_access_control" resource with "name" argument with "origin_access_control_origin_type" with value "s3" with "signing_behavior" argument with "signing_protocol" argument have one "aws_cloudfront_distribution" resource with "origin.domain_name" argument linking to "aws_s3...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "video_content" { bucket_prefix = "video-content-" } resource "aws_s3_object" "put_website" { bu...
package netflix_high import rego.v1 bucket_valid(bucket) := true distribution_valid(distribution, bucket) if { some origin in distribution.expressions.origin bucket.address in origin.domain_name.references some cache_behavior in distribution.expressions.default_cache_behavior {method...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_cloudfront_distribution.my_distribution","mode":"managed","type":"aws_cloudfront_distribution","name":"my_distribution","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"a...
215
I need our video site to load quickly for people in many different regions, with the files stored in one place but delivered in a way that keeps things fast and reliable even during heavy traffic.
The infrastructure for a video streaming site that distribute video content globally. Accomplish this using the following services: AWS CloudFront, AWS Route53, and AWS S3. Add any necessary services.
have one "aws_cloudfront_origin_access_control" resource with "name" argument with "origin_access_control_origin_type" with value "s3" with "signing_behavior" argument with "signing_protocol" argument have one "aws_cloudfront_distribution" resource with "origin.domain_name" argument linking to "aws_...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "video_content" { bucket_prefix = "video-content-" } resource "aws_s3_object" "put_website" { bu...
package netflix_middle import data.set import rego.v1 bucket_valid(bucket) := true access_control_valid(access_control) if { access_control.expressions.name access_control.expressions.origin_access_control_origin_type.constant_value == "s3" access_control.expressions.signing_behavior access_control.expressions.s...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_cloudfront_distribution.my_distribution","mode":"managed","type":"aws_cloudfront_distribution","name":"my_distribution","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"a...
216
I need our video site to load quickly for people in many different countries, with the files stored in one place but delivered efficiently and tied to our own domain, while keeping things reasonably secure.
The infrastructure for a video streaming site that distribute video content globally. Accomplish this using the following resources: AWS CloudFront origin access control, AWS CloudFront distribution, AWS Route53 zone, and AWS S3 record. Add any necessary resources.
have one "aws_cloudfront_origin_access_control" resource with "name" argument with "origin_access_control_origin_type" with value "s3" with "signing_behavior" argument with "signing_protocol" argument have one "aws_cloudfront_distribution" resource with "origin.domain_name" argument linking to "aws_...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "video_content" { bucket_prefix = "video-content-" } locals { s3_origin_id = "s3_video_content_o...
package netflix_middle import data.set import rego.v1 bucket_valid(bucket) := true access_control_valid(access_control) if { access_control.expressions.name access_control.expressions.origin_access_control_origin_type.constant_value == "s3" access_control.expressions.signing_behavior access_control.expressions.s...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_cloudfront_distribution.my_distribution","mode":"managed","type":"aws_cloudfront_distribution","name":"my_distribution","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"a...
217
I want to make some static site content load quickly for people in a few specific countries, serve it under our own domain, and still keep things simple and inexpensive to run.
An AWS CloudFront distribution resource with an origin argument block with the following arguments (1) a domain_name argument linking to the S3 backet domain name; (2) an origin_access_control_id argument linking to a CloudFront origin access control resource; (3) an origin_id argument with value "s3_video_content_orig...
have one "aws_cloudfront_origin_access_control" resource with "name" argument with "origin_access_control_origin_type" with value "s3" with "signing_behavior" argument with value "always" with "signing_protocol" argument with value "sigv4" have one "aws_cloudfront_distribution" resource with "origi...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_s3_bucket" "website_content" { bucket_prefix = "website-content-" } resource "aws_s3_object" "put_website" ...
package netflix import data.set import rego.v1 bucket_valid(bucket) := true access_control_valid(access_control) if { access_control.expressions.name access_control.expressions.origin_access_control_origin_type.constant_value == "s3" access_control.expressions.signing_behavior.constant_value == "always" access_c...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_cloudfront_distribution.my_distribution","mode":"managed","type":"aws_cloudfront_distribution","name":"my_distribution","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"a...
218
I need a small database set up for testing, and its access details should be created automatically so I don’t have to pick them myself.
Create aws RDS with randomly generated id and password
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username" with skip_final_snapshot = true with identifier generated from random_id with passw...
provider "aws" { region="us-east-1" } resource "random_id" "suffix" { byte_length = 4 } resource "random_password" "db" { length = 16 special = false } resource "aws_db_instance" "test" { identifier = "metricbeat-test-${random_id.suffix.hex}" allocated_storage = 20 // Gigabytes engine ...
package terraform.validation default is_configuration_valid = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required argum...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.test","mode":"managed","type":"aws_db_instance","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_version_upgr...
219
I need a small, low-cost database for testing purposes, and it should come with its own autogenerated credentials so I don’t have to manage them myself.
Create an 20GB mysql on aws with randomly generated id and password
Has one "aws_db_instance" resource with one "allocated_storage" = 20 with one "engine" = mysql with one "instance_class" and a valid instance class type with one "password" with one "username" with skip_final_snapshot = true with identifier generated from random_id with password generate...
provider "aws" { region="us-east-1" } resource "random_id" "suffix" { byte_length = 4 } resource "random_password" "db" { length = 16 special = false } resource "aws_db_instance" "test" { identifier = "metricbeat-test-${random_id.suffix.hex}" allocated_storage = 20 // Gigabytes engine ...
package terraform.validation default is_configuration_valid = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required argum...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.test","mode":"managed","type":"aws_db_instance","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_version_upgr...
220
I need a small test database that I can reach from anywhere, with its own login details generated automatically so I don’t have to manage them myself.
creates a MySQL database instance on AWS RDS with a public access security group and a randomly generated password.
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" = "mysql" with one "instance_class" and a valid instance class type with one "password" with one "username" has one security_group has one random_string
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_region" "current" {} resource "random_string" "db_password" { keepers = { region = data.aws_region.current.na...
package terraform.validation default is_valid_db_instance = false default is_valid_security_group = false default is_valid_random_string = false # Validate aws_db_instance resource is_valid_db_instance { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_db_instance" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.main","mode":"managed","type":"aws_db_instance","name":"main","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_version_upgr...
221
I need a small, budget-friendly place to run a test database that I can reach from anywhere, and it should generate its own access credentials so I don’t have to manage them manually.
creates a 200 GB 5.7 MySQL database instance on AWS RDS with db.t3.micro instance class, a public access security group and a randomly generated password.
Has one "aws_db_instance" resource with one "allocated_storage" = 200 with one "engine" = "mysql" with one "instance_class" = "db.t3.micro" with one "password" with one "username" with one "engine_version" = "5.7" has one security_group has one random_string
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_region" "current" {} resource "random_string" "db_password" { keepers = { region = data.aws_region.current.na...
package terraform.validation default is_valid_db_instance = false default is_valid_security_group = false default is_valid_random_string = false # Validate aws_db_instance resource is_valid_db_instance { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_db_instance" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.main","mode":"managed","type":"aws_db_instance","name":"main","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":200,"allow_major_version_upg...
222
I need a small test database that I can reach from anywhere, with its own login details generated for me, and it should keep a backup when I remove it.
creates a MySQL database instance on AWS RDS with a public access security group and a randomly generated password. It should not skip final snapshot
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" = "mysql" with one "instance_class" and a valid instance class type with one "password" with one "username" with one "skip_final_snapshot" = false has one security_group has one random_string
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_region" "current" {} resource "random_string" "db_password" { keepers = { region = data.aws_region.current.na...
package terraform.validation default is_valid_db_instance = false default is_valid_security_group = false default is_valid_random_string = false # Validate aws_db_instance resource is_valid_db_instance { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_db_instance" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.main","mode":"managed","type":"aws_db_instance","name":"main","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_version_upgr...
223
I need to enable some extra capabilities on our database so it can record important activity and keep the stored data protected, and I want this set up in a way that stays compatible with the specific database version we’re using.
defines an AWS RDS option group named "option-group-pike" with major engine version 11, and use the sqlserver-ee engine. It should have options for "SQLSERVER_AUDIT" and "TDE"
Has one "aws_db_option_group" resource with one "engine_name" = "sqlserver-ee" with one "major_engine_version" = "11.00" with options for "SQLSERVER_AUDIT" and "TDE"
provider "aws" { region = "us-east-1" } resource "aws_db_option_group" "example" { name = "option-group-pike" option_group_description = "Terraform Option Group" engine_name = "sqlserver-ee" major_engine_version = "11.00" option { db_security_group_memberships = [...
package rds_new default is_valid_db_option_group = false # Regex pattern to match "11", "11.0", "11.00", etc. pattern := `^11(\.0+)?$` # Validate aws_db_option_group resource is_valid_db_option_group { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_db_option_group"...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_option_group.example","mode":"managed","type":"aws_db_option_group","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"engine_name":"sqlserver-ee","majo...
224
I need to enable some extra data‑protection and oversight features on our database, including sending certain activity details to an external location, and I want to keep the setup flexible enough to plug into our existing access controls.
defines an AWS RDS option group named "option-group-pike" with major engine version 11, and use the sqlserver-ee engine. It should have options for "SQLSERVER_AUDIT" and "TDE". IAM_ROLE_ARN and S3_BUCKET_ARN are included under SQLSERVER_AUDIT option
Has one "aws_db_option_group" resource with one "engine_name" = "sqlserver-ee" with one "major_engine_version" = "11.00" with options for "SQLSERVER_AUDIT" and "TDE" with specific settings for "IAM_ROLE_ARN" and "S3_BUCKET_ARN"
provider "aws" { region = "us-east-1" } resource "aws_db_option_group" "example" { name = "option-group-pike" option_group_description = "Terraform Option Group" engine_name = "sqlserver-ee" major_engine_version = "11.00" option { db_security_group_memberships = [...
package terraform.validation default is_valid_db_option_group = false # Regex pattern to match "11", "11.0", "11.00", etc. pattern := `^11(\.0+)?$` # Validate aws_db_option_group resource is_valid_db_option_group { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_db_o...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_option_group.example","mode":"managed","type":"aws_db_option_group","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"engine_name":"sqlserver-ee","majo...
225
I just need a super simple, low‑cost cloud machine spun up so I can run a few lightweight tasks without dealing with a bunch of setup.
generate Basic Amazon Lightsail
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument
provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "custom" { name = "custom" availability_zone = "us-east-1a" blueprint_id = "amazon_linux_2" bundle_id = "nano_2_0" }
package terraform.validation default is_valid_lightsail_instance = false is_valid_lightsail_instance { resource := input.planned_values.root_module.resources[_] resource.type == "aws_lightsail_instance" has_required_arguments(resource.values) } has_required_arguments(values) { values.name values....
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.custom","mode":"managed","type":"aws_lightsail_instance","name":"custom","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availability_zone...
226
I just need a small, low‑cost server spun up with a simple startup script that installs a basic web page as soon as it comes online.
generate Basic Amazon Lightsail with user Data
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument with "user_data" argument
provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "custom" { name = "custom" availability_zone = "us-east-1b" blueprint_id = "amazon_linux_2" bundle_id = "nano_1_0" user_data = "sudo yum install -y httpd && sudo systemctl start httpd && sudo sys...
package aws_lightsail_instance # aws_lightsail_instance attributes - optional default lightsail_instance_valid := false lightsail_instance_valid { instance := input.configuration.root_module.resources[_] instance.type == "aws_lightsail_instance" expressions := instance.expressions # Validate the presence ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.custom","mode":"managed","type":"aws_lightsail_instance","name":"custom","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availability_zone...
227
I need a small always‑on server for a simple app, and it should regularly back itself up without me having to manage anything.
generate aws lightsail with auto snapshots enabled
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument with "add_on" argument with type = "AutoSnapshot" with snapshot_time argument with status = "Enabled"
provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "test" { name = "custom_instance" availability_zone = "us-east-1b" blueprint_id = "amazon_linux_2" bundle_id = "nano_2_0" add_on { type = "AutoSnapshot" snapshot_time = "06:00" statu...
package terraform.validation default is_valid_lightsail_instance = false is_valid_lightsail_instance { resource := input.planned_values.root_module.resources[_] resource.type == "aws_lightsail_instance" has_required_arguments(resource.values) has_valid_add_on(resource.values.add_on) } has_required_ar...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.test","mode":"managed","type":"aws_lightsail_instance","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[{"snapshot_time":"06:00"...
228
I need a small, low‑cost server spun up quickly so I can test some code on a clean, standard setup.
create AWS Lightsail with default blueprint
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument
provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "gitlab_test" { name = "custom_gitlab" availability_zone = "us-east-1b" blueprint_id = "amazon_linux_2" bundle_id = "nano_2_0" }
package terraform.validation default is_valid_lightsail_instance = false is_valid_lightsail_instance { resource := input.planned_values.root_module.resources[_] resource.type == "aws_lightsail_instance" has_required_arguments(resource.values) } has_required_arguments(values) { values.name values....
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.gitlab_test","mode":"managed","type":"aws_lightsail_instance","name":"gitlab_test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availab...
229
I want a small, low-cost setup that can run a basic website with minimal effort, and it should be easy to get started without managing a lot of underlying pieces.
create AWS Lightsail with WordPress blueprint
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" = wordpress with "bundle_id" argument
provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "custom" { name = "custom" availability_zone = "us-east-1b" blueprint_id = "wordpress" bundle_id = "nano_2_0" }
package terraform.validation default has_valid_lightsail_instance = false # Main rule to check for a valid aws_lightsail_instance has_valid_lightsail_instance { resource := input.planned_values.root_module.resources[_] resource.type == "aws_lightsail_instance" resource.values.name resource.values.avai...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.custom","mode":"managed","type":"aws_lightsail_instance","name":"custom","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availability_zone...
230
I need a small always-on machine for a side project, and it should keep the same public address even if it restarts.
create Amazon Lightsail with static ipv4 IP
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument has one "aws_lightsail_static_ip" resource with "name" argument has one "aws_lightsail_static_ip_attachment" resource with "static_ip_name" a...
provider "aws" { region = "us-east-1" } resource "aws_lightsail_static_ip_attachment" "test" { static_ip_name = aws_lightsail_static_ip.test.id instance_name = aws_lightsail_instance.test.id } resource "aws_lightsail_static_ip" "test" { name = "example" } resource "aws_lightsail_instance" "test" { nam...
package terraform.validation default has_required_resources = false # Rule for aws_lightsail_instance resource has_lightsail_instance { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_instance" resource.values.name resource.values.availability_zone ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.test","mode":"managed","type":"aws_lightsail_instance","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availability_zone":"u...
231
I need a small, easy-to-manage server for a simple app, and it should work smoothly whether people connect with older or newer network formats.
create Amazon Lightsail with dualstack IP
have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument with "ip_address_type" = "dualstack"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_instance" "custom" { name = "custom" availability_zone = "us-east-1b" blueprint_id = "wordpress" bundle_id ...
package terraform.validation default has_valid_lightsail_instance = false # Main rule to check for a valid aws_lightsail_instance with specific arguments has_valid_lightsail_instance { resource := input.planned_values.root_module.resources[_] resource.type == "aws_lightsail_instance" resource.values.name ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_instance.custom","mode":"managed","type":"aws_lightsail_instance","name":"custom","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"add_on":[],"availability_zone...
232
I need a small, easy-to-manage place to store relational data for an app, and it should be simple to run without much overhead.
create AWS Lightsail that creates a managed database
have one "aws_lightsail_database" resource with relational_database_name argument with master_database_name with master_password with master_username with blueprint_id with bundle_id
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_database" "test" { relational_database_name = "test" availability_zone = "us-east-1a" master_database_name = "testdatab...
package terraform.validation default has_valid_lightsail_database = false # Rule for aws_lightsail_database resource with specific arguments has_valid_lightsail_database { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_database" resource.values.relatio...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_database.test","mode":"managed","type":"aws_lightsail_database","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","...
233
I need a small, easy-to-manage place to run a basic data store for an app, something lightweight that won’t require much upkeep but is still secure and reliable.
create an AWS Lightsail instance that creates a mysql database
have one "aws_lightsail_database" resource with relational_database_name argument with master_database_name with master_password with master_username with blueprint_id = mysql_8_0 with bundle_id
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_database" "test" { relational_database_name = "test" availability_zone = "us-east-1a" master_database_name = "testdatab...
package terraform.validation default has_valid_lightsail_database = false # Rule for aws_lightsail_database resource with specific arguments has_valid_lightsail_database { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_database" resource.values.relatio...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_database.test","mode":"managed","type":"aws_lightsail_database","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","...
234
I need a small managed data store for one of our apps, and it should handle its own backups and routine upkeep within specific time windows so it doesn’t disrupt users.
create AWS Lightsail that creates a mysql database. It should allow daily backups to take place between 16:00 and 16:30 each day and requires any maintiance tasks (anything that would cause an outage, including changing some attributes) to take place on Tuesdays between 17:00 and 17:30
have one "aws_lightsail_database" resource with relational_database_name argument with master_database_name with master_password with master_username with blueprint_id with bundle_id with preferred_backup_window = "16:00-16:30" preferred_maintenance_window = "Tue:17:00-Tue:17:30" ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_database" "test" { relational_database_name = "test" availability_zone = "us-east-1a" master_database_name ...
package terraform.validation default has_valid_lightsail_database = false # Rule for aws_lightsail_database resource with specific arguments has_valid_lightsail_database { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_database" resource.values.relatio...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_database.test","mode":"managed","type":"aws_lightsail_database","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","...
235
I need a small managed data store for an app prototype, and if we ever delete it I want to make sure we can keep a last backup without much hassle.
AWS Lightsail that creates a postgres database, which enable creating a final snapshot of your database on deletion
"have one ""aws_lightsail_database"" resource with relational_database_name argument with master_database_name with master_password with master_username with blueprint_id = postgres_12 with bundle_id with final_snapshot_name
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_database" "test" { relational_database_name = "test" availability_zone = "us-east-1a" master_database_name ...
package terraform.validation default has_valid_lightsail_database = false # Rule for aws_lightsail_database resource with specific arguments has_valid_lightsail_database { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_database" resource.values.relatio...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_database.test","mode":"managed","type":"aws_lightsail_database","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","...
236
I need a small managed data store set up for a side project, and I want any updates to take effect right away instead of waiting for a scheduled rollout.
AWS Lightsail that creates a postgres database, which enable applying changes immediately instead of waiting for a maintiance window
"have one ""aws_lightsail_database"" resource with relational_database_name argument with master_database_name with master_password with master_username with blueprint_id = postgres_12 with bundle_id apply_immediately = true
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_database" "test" { relational_database_name = "test" availability_zone = "us-east-1a" master_database_name = "testdatab...
package terraform.validation default has_valid_lightsail_database = false # Rule for aws_lightsail_database resource with specific arguments has_valid_lightsail_database { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_database" resource.values.relatio...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_database.test","mode":"managed","type":"aws_lightsail_database","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"apply_immediately":true,"availabi...
237
I need some extra storage that I can attach to a small, low‑maintenance server, and it should live close to where that server runs.
create a Lightsail Disk resource
have one aws_lightsail_disk resource with name with size_in_gb with availability_zone
provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" filter { name = "opt-in-status" values = ["opt-in-not-required"] } } resource "aws_lightsail_disk" "test" { name = "test" size_in_gb = 8 availability_zone = data.aws...
package terraform.validation default has_valid_lightsail_disk = false # Rule for aws_lightsail_disk resource with specific arguments has_valid_lightsail_disk { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_disk" resource.values.name resource.value...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_disk.test","mode":"managed","type":"aws_lightsail_disk","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","name":"t...
238
I need a small virtual machine with some extra storage added to it so I can keep a bit more data on hand without making things too complex.
Create a Lightsail Disk resource and attaches the Lightsail disk to a Lightsail Instance
have one aws_lightsail_disk resource with name with size_in_gb with availability_zone have one aws_lightsail_disk_attachment with disk_name with instance_name with disk_path have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blue...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" filter { name = "opt-in-status" values = ["opt-in-not-required"] } } resource ...
package terraform.validation default has_valid_lightsail_disk = false # Rule for aws_lightsail_disk resource with specific arguments has_valid_lightsail_disk { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_disk" resource.values.name resource.value...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_disk.test","mode":"managed","type":"aws_lightsail_disk","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","name":"t...
239
I need a small app server with some extra space for files, and the extra storage should be attached so the server can use it directly.
Set up a storage unit in Lightsail and link it with a Lightsail compute service.
have one aws_lightsail_disk resource with name with size_in_gb with availability_zone have one aws_lightsail_disk_attachment with disk_name with instance_name with disk_path have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blue...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" filter { name = "opt-in-status" values = ["opt-in-not-required"] } } resource ...
package terraform.validation default has_valid_lightsail_disk = false # Rule for aws_lightsail_disk resource with specific arguments has_valid_lightsail_disk { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_disk" resource.values.name resource.value...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_disk.test","mode":"managed","type":"aws_lightsail_disk","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","name":"t...
240
I need a small compute environment set up with some extra storage space added on, keeping everything simple and low‑cost.
Create a Lightsail instance with multiple attached disks
have multiple aws_lightsail_disk resource with name with size_in_gb with availability_zone have multiple aws_lightsail_disk_attachment with disk_name with instance_name with disk_path have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" filter { name = "opt-in-status" values = ["opt-...
package terraform.validation default has_valid_resources = false # Rule for multiple aws_lightsail_disk resources has_valid_lightsail_disks { count([disk | disk := input.planned_values.root_module.resources[_]; disk.type == "aws_lightsail_disk"; disk.values.name; disk.values.size_in_gb; disk.values.availability_z...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_disk.test1","mode":"managed","type":"aws_lightsail_disk","name":"test1","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"availability_zone":"us-east-1a","name":...
241
I need a simple way for one of my lightweight app environments to read and write files in a shared storage location without overcomplicating the setup.
Create a lightsail resource access to a bucket.
have one aws_lightsail_bucket with name with bundle_id have one aws_lightsail_bucket_resource_access with bucket_name with resource_name have one "aws_lightsail_instance" resource with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argum...
provider "aws" { region = "us-east-1" } resource "aws_lightsail_bucket" "test" { name = "mytestbucket" bundle_id = "small_1_0" } resource "aws_lightsail_instance" "test" { name = "mytestinstance" availability_zone = "us-east-1b" blueprint_id = "amazon_linux_2" bundle_id ...
package terraform.validation default has_valid_resources = false # Rule for aws_lightsail_bucket resource has_valid_lightsail_bucket { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_bucket" resource.values.name resource.values.bundle_id } # Rule f...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_bucket.test","mode":"managed","type":"aws_lightsail_bucket","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bundle_id":"small_1_0","force_delete"...
242
I need a way to verify ownership of my domain so I can serve it securely across a couple of related names.
Provides a lightsail certificate.
have one aws_lightsail_certificate with name with domain_name
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_certificate" "test" { name = "test" domain_name = "testdomain.com" subject_alternative_names = ...
package terraform.validation default has_valid_lightsail_certificate = false # Rule for aws_lightsail_certificate resource with specific arguments has_valid_lightsail_certificate { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_certificate" resource.va...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_certificate.test","mode":"managed","type":"aws_lightsail_certificate","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"domain_name":"testdomain.co...
243
I need a simple way to serve content quickly from a place where I store files, and it should handle a variety of request types without costing too much or getting overly complex.
managing an AWS Lightsail Distribution with bucket as the origin
have one aws_lightsail_distribution with name with bundle_id with default_cache_behavior with behavior with origin with name with region_name with cache_behavior_settings with forwarded_cookies with cookies_allow_list with forwarded_headers ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_lightsail_bucket" "test" { name = "test-bucket" bundle_id = "small_1_0" } resource "aws_lightsail_distribution" "test" { name = "t...
package terraform.validation default has_valid_resources = false # Rule for aws_lightsail_distribution resource has_valid_lightsail_distribution { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_distribution" resource.values.name resource.values.bun...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_bucket.test","mode":"managed","type":"aws_lightsail_bucket","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bundle_id":"small_1_0","force_delete"...
244
I want to make our small app load reliably and quickly from a single, steady address, and have a simple way to fan out traffic so users get decent performance without adding much complexity.
managing an AWS Lightsail Distribution with a instance as an origin
have one aws_lightsail_instance with "name" argument with "availability_zone" argument with "blueprint_id" argument with "bundle_id" argument have one aws_lightsail_distribution with name with bundle_id with default_cache_behavior with behavior with origin with name ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" filter { name = "opt-in-status" values = ["opt-in-not-required"] } } resource ...
package terraform.validation default has_valid_lightsail_resources = false # Rule for aws_lightsail_instance resource has_lightsail_instance { some i resource := input.planned_values.root_module.resources[i] resource.type == "aws_lightsail_instance" } # Rule for aws_lightsail_distribution resource has_li...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_lightsail_distribution.test","mode":"managed","type":"aws_lightsail_distribution","name":"test","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"bundle_id":"small_1_0","c...
245
I need a place to stash old data we almost never touch, and it should be really cheap to keep around for years.
an S3 Glacier vault for long-term data archiving
has one "aws_glacier_vault" resource with "name" attribute
provider "aws" { region = "us-east-1" # Replace with your desired AWS region } resource "aws_glacier_vault" "example" { name = "my-glacier-vault" }
package terraform.validation default is_valid_glacier_vault = false # Rule to validate if there is at least one aws_glacier_vault resource with a name attribute is_valid_glacier_vault { # Find a resource that is of type aws_glacier_vault resource := input.planned_values.root_module.resources[_] resource.t...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.example","mode":"managed","type":"aws_glacier_vault","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":null,"name":"my-glacie...
246
I need a place to keep data that we rarely touch but have to retain for a long time, and it should be as low‑cost as possible.
generate a aws storage for long term data and backup
has one "aws_glacier_vault" resource with "name" attribute
provider "aws" { region = "us-east-1" # Replace with your desired AWS region } resource "aws_glacier_vault" "example" { name = "my-glacier-vault" }
package terraform.validation default is_valid_glacier_vault = false # Rule to validate if there is at least one aws_glacier_vault resource with a name attribute is_valid_glacier_vault { # Find a resource that is of type aws_glacier_vault resource := input.planned_values.root_module.resources[_] resource.t...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.example","mode":"managed","type":"aws_glacier_vault","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":null,"name":"my-glacie...
247
I need a way to automatically move older data into a long‑term storage area so we can keep costs down while still keeping everything organized over time.
generate an S3 Glacier vault and use it with AWS Data Lifecycle Manager
has one "aws_glacier_vault" resource with "name" attribute has one "aws_dlm_lifecycle_policy" resource with "description" attribute with "execution_role_arn" with "policy_details"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_glacier_vault" "example" { name = "my-glacier-vault" } resource "aws_dlm_lifecycle_policy" "example" { description = "Automated archiving pol...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with a 'name' attribute is_valid_glacier_vault { resource := input.planned_values.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.values.nam...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_dlm_lifecycle_policy.example","mode":"managed","type":"aws_dlm_lifecycle_policy","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"default_policy":null,"d...
248
I need a place to keep long‑term data that rarely changes, and I’d like to get alerted whenever someone finishes pulling something out of it.
generate an S3 Glacier vault with notification service
has one "aws_glacier_vault" resource with "name" attribute with "notification" attribute with "event" attribute with "sns_topic" attribute with "aws_sns_topic" resource
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_sns_topic" "aws_sns_topic" { name = "glacier-sns-topic" } data "aws_iam_policy_document" "my_archive" { statement { sid = "add-read-on...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'notification' attributes is_valid_glacier_vault { resource := input.configuration.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.name) not is_...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.my_archive","mode":"managed","type":"aws_glacier_vault","name":"my_archive","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":"{\"Statement\":...
249
We need a place to keep infrequently used data for a long time, and I want to get alerted whenever someone finishes pulling something out of it.
generate an S3 Glacier vault with notification service integrated with aws sns
has one "aws_glacier_vault" resource with "name" attribute with "notification" attribute with "event" attribute with "sns_topic" attribute with "aws_sns_topic" resource
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_sns_topic" "aws_sns_topic" { name = "glacier-sns-topic" } data "aws_iam_policy_document" "my_archive" { statement { sid = "add-read-on...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'notification' attributes is_valid_glacier_vault { resource := input.configuration.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.name) not is_...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.my_archive","mode":"managed","type":"aws_glacier_vault","name":"my_archive","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":"{\"Statement\":...
250
We need a place to keep long‑term data that’s rarely touched, and it should have some basic rules about who can retrieve it so we stay compliant.
generage an S3 Glacier vault with a access policy
has one "aws_glacier_vault" resource with "name" attribute with "access_policy" attribute
provider "aws" { region = "us-east-1" } data "aws_iam_policy_document" "my_archive" { statement { sid = "add-read-only-perm" effect = "Allow" principals { type = "*" identifiers = ["*"] } actions = [ "glacier:InitiateJob", "glacier:GetJobOutput", ] ...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'access_policy' attributes is_valid_glacier_vault { resource := input.planned_values.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.values.name)...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.my_archive","mode":"managed","type":"aws_glacier_vault","name":"my_archive","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":"{\"Statement\":...
251
We need a super cheap place to keep long‑term records, but with guardrails so nobody can remove anything too soon.
generage an S3 Glacier vault with a Vault Lock
has one "aws_glacier_vault" resource with "name" attribute implicit: has one "aws_iam_policy_document" data has one "aws_glacier_vault_lock" resource
provider "aws" { region = "us-east-1" } resource "aws_glacier_vault" "example" { name = "example" } data "aws_iam_policy_document" "example" { statement { actions = ["glacier:DeleteArchive"] effect = "Deny" resources = [aws_glacier_vault.example.arn] condition { test = "Numer...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'access_policy' attributes is_valid_glacier_vault { resource := input.planned_values.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.values.name)...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.example","mode":"managed","type":"aws_glacier_vault","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":null,"name":"example",...
252
We need a long‑term storage area for rarely accessed data, and it should have strict, hard‑to‑change rules that prevent anything from being removed too soon.
generage an S3 Glacier vault with a access policy using "aws_iam_policy_document" data and lock the policy using "aws_glacier_vault_lock"
has one "aws_glacier_vault" resource with "name" attribute with "access_policy" attributr implicit: has one "aws_iam_policy_document" data has one "aws_glacier_vault_lock" resource
provider "aws" { region = "us-east-1" } resource "aws_glacier_vault" "example" { name = "example" } data "aws_iam_policy_document" "example" { statement { actions = ["glacier:DeleteArchive"] effect = "Deny" resources = [aws_glacier_vault.example.arn] condition { test = "Numer...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'access_policy' attributes is_valid_glacier_vault { resource := input.planned_values.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.values.name)...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.example","mode":"managed","type":"aws_glacier_vault","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":null,"name":"example",...
253
I need a long‑term storage area for rarely used data, and I want to get a heads‑up whenever something I’ve asked to restore is finally ready, so we don’t have to keep checking manually.
generage an S3 Glacier vault that pushes notification when archive retrieval completed
has one "aws_glacier_vault" resource with "name" attribute with notification attribute with sns_topic attribute with events attribute has one aws_sns_topic resources with name attribute
provider "aws" { region = "us-east-1" } # Create an SNS topic for notifications resource "aws_sns_topic" "glacier_notifications" { name = "glacier-archive-retrieval-notifications" } # Create an S3 Glacier Vault resource "aws_glacier_vault" "my_archive" { name = "MyArchive" notification { sns_topic = ...
package terraform.validation default is_valid_terraform_plan = false # Rule to check for an AWS Glacier Vault resource with 'name' and 'notification' attributes is_valid_glacier_vault { resource := input.planned_values.root_module.resources[_] resource.type == "aws_glacier_vault" not is_null(resource.valu...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_glacier_vault.my_archive","mode":"managed","type":"aws_glacier_vault","name":"my_archive","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"access_policy":null,"name":"MyA...
254
I want to put a single entry point in front of my app so incoming requests hit one place first and then get passed along to whatever is actually running the workload, and it should stay simple while still being able to handle public traffic reliably.
generate an Load Balancer that forward to a NLB
has one "aws_lb" resource with one of either "subnets" or "subnet_mapping" with load_balancer_type = "network" has at least two aws_subnet has one of cidr_block, ipv6_cidr_block argument has one aws_vpc has one of cider_block, ipv4_ipam_pool_id has one "aws_lb_listener" resource with one default...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } data "aws_availability_zones" "available" { state = "available" } module "vpc" { source = "terraform-aws-modules/vpc/aw...
package terraform.validation default is_valid_configuration = false # Validate at least one aws_instance with the required arguments is_valid_instance { count(valid_instances) > 0 } valid_instances[instance] { instance := input.configuration.root_module.resources[_] instance.type == "aws_inst...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_instance.my_instance","mode":"managed","type":"aws_instance","name":"my_instance","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"ami":"ami-0e57ee5ef084f761c","credit_sp...
255
I need a lightweight, reliable place for my app to store structured data, nothing fancy, just something easy to run and maintain.
create a basic AWS RDS instance
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username"
# Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_db_instance" "default" { allocated_storage = 10 db_name = "mydb" engine = "mysql" engine_version = "5.7" instance_class = "db.t3.micro" usernam...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":10,"allow_major_versio...
256
I need a simple place to store structured app data that can handle basic queries without much setup.
create a basic AWS SQL database
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 10 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all requ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":10,"allow_major_versio...
257
I need a place to store our app’s data that can handle moderate workloads without costing too much, and it should be easy to set up and manage.
Create a db.t4g.large AWS PostgresSQL
Has one "aws_db_instance" resource with one "allocated_storage" minimun value is 20 with one "engine" with value "postgres" with one "instance_class" with value "db.t4g.large" with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "postgres" instance_class = "db.t4g.large" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all requ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
258
I need a small database for an app we’re testing, but it should be tuned so it can handle heavier in‑memory workloads without slowing down.
create a memory optimized PostgresSQL
Has one "aws_db_instance" resource with one "allocated_storage" minimun value is 20 with one "engine" with value "postgres" with one "instance_class" with valid memory-optimized class with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "postgres" instance_class = "db.z1d.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all requ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
259
I need a small managed database for a project, and whenever I tweak its settings I want those updates to take effect right away.
Create an AWS mySQL instance. Any changes on it will be applied immediately
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" with value "mysql" with one "instance_class" with one "password" with one "username" with one "apply_immediately" set to true
# Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.z1d.micro" username = "foo" password = "foobarbaz" a...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
260
I need a small relational data store for an app I’m testing, and it’s fine if it doesn’t keep a backup when I tear it down.
Create an AWS mySQL instance that skips the final snapshot
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" with value "mysql" with one "instance_class" with one "password" with one "username" with one "skip_final_snapshot" set to true
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.z1d.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all requ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
261
I need a main data store for our app and a secondary one that can take over some of the read load, just to keep things responsive as we grow.
create an aws sql, and make a replica of it
Has two "aws_db_instance" resources resource 1: with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username" resource 2: with one "instance_class" with one "replicate_source_db" = "aws_db_instance.resource1.id...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.z1d.micro" username ...
package terraform.validation default is_valid_db_instance = false has_valid_replica { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" resource.expressions.instance_class resource.expressions.replicate_source_db } # Rule to check if a valid aws_db_instance exists ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
262
I need to spin up a new copy of an existing data system so we can reuse what's already there instead of rebuilding everything from scratch.
create a aws relational database from a snapshot
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "replica" { snapshot_identifier = "your identifier" instance_class = "db.z1d.micro" }
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.replica","mode":"managed","type":"aws_db_instance","name":"replica","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allow_major_version_upgrade":null,"apply_...
263
I need a small, low-cost place to store structured app data that’s fine living in our cloud environment and doesn’t need anything fancy, just something reliable enough for basic use.
create a basic AWS RDS instance, with gp3 storage type
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
264
I need a small, reliable place to run a standard relational datastore for my app, and it should use a storage setup that can handle steady performance even under load.
create a basic AWS RDS instance, with io1 storage type
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 100 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":100,"allow_major_versi...
265
I need to bring a data snapshot we’ve stored elsewhere back into a working database so the system can start from that state again.
create an aws database restored from s3
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username" with one s3_import with one bucket_name with one ingestion_role wit...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
266
I need a place to store application data that can grow on its own as usage increases, without me having to constantly resize anything.
create an AWS database that enables storage autoscaling
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username" with one max_allocated_storage
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
267
I need a simple managed data store set up so that its main login details are handled automatically and kept secure without me having to manage them manually.
create an aws database that Managed Master Passwords via Secrets Manager, default KMS Key
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "username" with one manage_master_user_password
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } # Helper rule to check if all required arguments are pr...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
268
I need a small, dependable data store set up, and I want its access details handled automatically in a secure way using our own encryption settings.
create an aws database that Managed Master Passwords via Secrets Manager, with specific KMS Key
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "username" with one manage_master_user_password with one "master_user_secret_kms_key_id" has one "aws_kms_key"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false default is_valid_kms_key = false # Rule to check if a valid aws_db_instance exists is_valid_db_instance { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_instance" has_required_db_arguments } is_valid_kms_key { re...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
269
I need a small, reliable place to store structured data for an app, and I also want a way to capture a full backup of it that I can restore later if something goes wrong.
Create an AWS mySQL instance and a snapshot of the instance
Has one "aws_db_instance" resource with one "allocated_storage" with one "engine" and a valid engine value with one "instance_class" and a valid instance class type with one "password" with one "username" Has one "aws_db_snapshot" instance with one "db_instance_identifier" with one "db_snapshot_identifier"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_db_instance" "default" { allocated_storage = 20 engine = "mysql" instance_class = "db.t3.micro" username ...
package terraform.validation default is_valid_db_instance = false default is_valid_db_snapshot = false is_valid_db_snapshot { resource := input.configuration.root_module.resources[_] resource.type == "aws_db_snapshot" resource.expressions.db_instance_identifier resource.expressions.db_snapshot_identifier }...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_instance.default","mode":"managed","type":"aws_db_instance","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":2,"values":{"allocated_storage":20,"allow_major_versio...
270
I need a shared place for multiple parts of my app to read and write files, and I want it labeled so it’s easy to keep track of.
create a AWS EFS File System with tags
Have one "aws_efs_file_system" resource with one tags
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_efs_file_system" "foo" { creation_token = "my-product" tags = { Name = "MyProduct" } }
package terraform.validation default is_valid_efs_file_system = false # Rule to check if a valid aws_efs_file_system exists is_valid_efs_file_system { resource := input.configuration.root_module.resources[_] resource.type == "aws_efs_file_system" resource.expressions.tags }
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_efs_file_system.foo","mode":"managed","type":"aws_efs_file_system","name":"foo","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"creation_token":"my-product","lifecycle_p...
271
I need a shared storage space that can automatically move older data to a cheaper tier so we don’t overspend.
create a AWS EFS Using lifecycle policy
Have one "aws_efs_file_system" resource
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_efs_file_system" "foo_with_lifecyle_policy" { creation_token = "my-product" lifecycle_policy { transition_to_ia = "AFTER_30_DAYS" } }
package terraform.validation default is_valid_efs_file_system = false # Rule to check if a valid aws_efs_file_system exists is_valid_efs_file_system { resource := input.configuration.root_module.resources[_] resource.type == "aws_efs_file_system" resource.expressions.lifecycle_policy }
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_efs_file_system.foo_with_lifecyle_policy","mode":"managed","type":"aws_efs_file_system","name":"foo_with_lifecyle_policy","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{...
272
I need a shared space for files that multiple parts of our system can work with, and it should only be usable in a secure way.
create a AWS EFS with a system policy
Have one "aws_efs_file_system" resource Have one "aws_efs_file_system_policy" resource with one "file_system_id" with one "policy"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_efs_file_system" "fs" { creation_token = "my-product" } data "aws_iam_policy_document" "policy" { statement { sid = "ExampleStatement0...
package terraform.validation default is_valid_efs_setup = false # Rule to check if a valid aws_efs_file_system and aws_efs_file_system_policy exists is_valid_efs_setup { has_valid_efs_file_system has_valid_efs_file_system_policy } # Helper rule to check if a valid aws_efs_file_system exists has_valid_efs_fil...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_efs_file_system.fs","mode":"managed","type":"aws_efs_file_system","name":"fs","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"creation_token":"my-product","lifecycle_pol...
273
I need a shared place for my app to store files, and it should automatically keep safe copies without me having to manage that myself.
create a AWS EFS with automatic backups enabled
Have one "aws_efs_file_system" resource Have one "aws_efs_backup_policy" resource with one "file_system_id" with one "backup_policy"
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_efs_file_system" "fs" { creation_token = "my-product" } resource "aws_efs_backup_policy" "policy" { file_system_id = aws_efs_file_system.fs.i...
package terraform.validation default is_valid_efs_setup = false # Rule to check if a valid aws_efs_file_system and aws_efs_file_system_policy exists is_valid_efs_setup { has_valid_efs_file_system has_valid_aws_efs_backup_policy } # Helper rule to check if a valid aws_efs_file_system exists has_valid_efs_file...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_efs_backup_policy.policy","mode":"managed","type":"aws_efs_backup_policy","name":"policy","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"backup_policy":[{"status":"ENAB...
274
I need a shared place for my apps to keep files so they can all read and write to the same spot, and it should be reachable from inside our private network.
create a AWS EFS with mount target
Have one "aws_efs_file_system" resource Have one "aws_efs_mount_target" resource with one "file_system_id" with one "subnet_id" Have one "aws_subnet" resource (FOR SUBNET_ID IN MOUNT_TARGET) Have one "aws_vpc" resource (FOR VPC_ID IN "AWS_SUBNET")
# Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_efs_file_system" "foo" { creation_token = "my-product" tags = { Name = "MyProduct" } } resource "aws_efs_mount_target" "alpha" { file_system_id = aws_efs_file_system.foo.id subnet_i...
package terraform.validation default is_valid_efs_setup = false # Rule to check if a valid aws_efs_file_system and aws_efs_file_system_policy exists is_valid_efs_setup { has_valid_efs_file_system has_valid_aws_efs_mount_target is_valid_vpc is_valid_subnet } # Helper rule to check if a valid aws_efs_f...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_efs_file_system.foo","mode":"managed","type":"aws_efs_file_system","name":"foo","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"creation_token":"my-product","lifecycle_p...
275
I need a private network for our systems that supports name lookups and can reach the outside world, and it should be isolated enough to meet stricter compliance needs.
sets up a dedicated-tenancy AWS VPC with DNS support and hostnames enabled, an internet gateway, and a route table for outbound internet access, all tagged with a variable name.
Has one "aws_vpc" resource with a specified "cidr_block" with "enable_dns_support" and "enable_dns_hostnames" set to true with "instance_tenancy" set to "dedicated" with a "tags" map including a "Name" key referencing a variable Has one "aws_internet_gateway" resource with "vpc_id" referencing the "aws_vpc" resource w...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "dgraph" { cidr_block = "10.0....
package terraform.validation default is_valid_vpc = false default is_valid_internet_gateway = false default is_valid_route_table = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expressions.cidr_block...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway.dgraph_gw","mode":"managed","type":"aws_internet_gateway","name":"dgraph_gw","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":...
276
I need a private network for our systems that lets them reach the wider internet when needed, keeps naming simple for anything we launch inside it, and can be labeled consistently for easier management.
sets up a dedicated-tenancy AWS VPC with DNS support and hostnames enabled, an internet gateway, and a route table for outbound internet access, all tagged with a variable name.
Has one "aws_vpc" resource with a specified "cidr_block" with "enable_dns_support" and "enable_dns_hostnames" set to true Has one "aws_internet_gateway" resource with "vpc_id" referencing the "aws_vpc" resource with a "tags" map including a "Name" key referencing a variable Has one "aws_route_table" resource with "vp...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "dgraph" { cidr_block = "10.0....
package terraform.validation default is_valid_vpc = false default is_valid_internet_gateway = false default is_valid_route_table = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expressions.cidr_block...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway.dgraph_gw","mode":"managed","type":"aws_internet_gateway","name":"dgraph_gw","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":...
277
I need a private network for our systems that still lets them reach the outside world when needed, stays isolated for security, and can be labeled consistently for easier management.
sets up a dedicated-tenancy AWS VPC with DNS support and hostnames enabled, an internet gateway, and a route table for outbound internet access, all tagged with a variable name.
Has one "aws_vpc" resource with a specified "cidr_block" with "enable_dns_support" and "enable_dns_hostnames" set to true with "instance_tenancy" set to "dedicated" with a "tags" map including a "Name" key referencing a variable Has one "aws_internet_gateway" resource with "vpc_id" referencing the "aws_vpc" resource w...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "dgraph" { cidr_block = "10.0....
package terraform.validation default is_valid_vpc = false default is_valid_internet_gateway = false default is_valid_route_table = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expressions.cidr_block...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway.dgraph_gw","mode":"managed","type":"aws_internet_gateway","name":"dgraph_gw","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":...
278
I need an isolated network space for our systems that still lets them reach the outside world, and it should be set up in the place where most of our other stuff already runs.
creates a VPC with a CIDR block of 10.0.0.0/16 and an internet gateway in the AWS us-east-2 region, both tagged with names "vpc" and "ig" respectively.
Has one "aws_vpc" resource: with a specified "cidr_block" of "10.0.0.0/16" with "enable_dns_hostnames" set to true with a "tags" map that is not null, including at least a "Name" key with a value Has one "aws_internet_gateway" resource: with "vpc_id" referencing the "aws_vpc" resource by its ID with a "tags" map that ...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "_" { cidr_block = "10.0.0.0/1...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_internet_gateway = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expression...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway._","mode":"managed","type":"aws_internet_gateway","name":"_","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":{"Name":"ig"},"t...
279
I need a private network for our systems that can also reach the public internet, and it should be set up in the general area where most of our users are located.
creates a VPC with a CIDR block, and an internet gateway in the AWS us-east-2 region, both tagged with names "vpc" and "ig" respectively.
Has one "aws_vpc" resource: with a specified "cidr_block" with "enable_dns_hostnames" set to true Has one "aws_internet_gateway" resource: with "vpc_id" referencing the "aws_vpc" resource by its ID with a "tags" map that is not null, including at least a "Name" key with a value
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "_" { cidr_block = "10.0.0.0/1...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_internet_gateway = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expression...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway._","mode":"managed","type":"aws_internet_gateway","name":"_","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":{"Name":"ig"},"t...
280
I need a private network for our systems where some parts can still reach the outside world, and it should be set up in the place where most of our users are.
creates a VPC with a CIDR block of 10.0.0.0/16 and an internet gateway in the AWS us-east-2 region
Has one "aws_vpc" resource: with a specified "cidr_block" of "10.0.0.0/16" with "enable_dns_hostnames" set to true Has one "aws_internet_gateway" resource: with "vpc_id" referencing the "aws_vpc" resource by its ID
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "_" { cidr_block = "10.0.0.0/1...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_internet_gateway = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expression...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway._","mode":"managed","type":"aws_internet_gateway","name":"_","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":{"Name":"ig"},"t...
281
I need a private network for our systems that can still reach the wider internet when needed, and it should be set up in the place where most of our users are located.
creates a VPC with a CIDR block and an internet gateway in the AWS us-east-2 region, both tagged with names "vpc" and "ig" respectively.
Has one "aws_vpc" resource: with a specified "cidr_block" with "enable_dns_hostnames" set to true with a "tags" map that is not null, including at least a "Name" key with a value Has one "aws_internet_gateway" resource: with "vpc_id" referencing the "aws_vpc" resource by its ID with a "tags" map that is not null, incl...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "_" { cidr_block = "10.0.0.0/1...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_internet_gateway = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_vpc" resource.expression...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_internet_gateway._","mode":"managed","type":"aws_internet_gateway","name":"_","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-east-2","tags":{"Name":"ig"},"t...
282
I need a private environment for our data systems that works across multiple fault zones, can reach the public internet when needed, and lets our database services accept connections openly while keeping the setup straightforward.
Set up a VPC in the AWS region with two subnets in different availability zones, an internet gateway, and a route table for internet access. It also defines a security group to allow access to MySQL and PostgreSQL databases on their default ports from any IP address and creates a database subnet group including both su...
Has one "aws_vpc" resource: Includes a "cidr_block" for network configuration Has two "aws_subnet" resources: Both subnets are part of the "aws_vpc" resource Each subnet is located in a different availability zone Has one "aws_internet_gateway" resource: Attached to the "aws_vpc" resource Has one "aws_route_table" r...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.75" } } required_version = ">= 1.9.0" } provider "aws" { region = "us-east-1" } resource "aws_vpc" "main" { cidr_block = "10.0.0.0/16" enable_dns_hostnames = true } data "aws_availability_zo...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_internet_gateway = false default is_valid_route_table = false default is_valid_subnets1 = false default is_valid_subnets2 = false default is_valid_security_group = false default is_valid_db_subnet_group = f...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_db_subnet_group.default","mode":"managed","type":"aws_db_subnet_group","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"description":"Managed by Terrafor...
283
I need a way for resources in one of our internal networks to reach the outside world using modern addressing, but without exposing them to unsolicited access, and it should be easy to keep track of.
creates an egress-only internet gateway named "pike" associated with a specified VPC, allowing IPv6-enabled instances to connect to the internet without allowing inbound internet traffic, and tags it with "permissions".
Has one "aws_egress_only_internet_gateway" resource: Associated with a specified "aws_vpc" resource Designed to allow IPv6-enabled instances within the VPC to connect to the internet while preventing inbound internet traffic Tagged with "permissions" to categorize or specify its role or access levels within the infrast...
# Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_egress_only_internet_gateway" "pike" { vpc_id = "vpc-0c33dc8cd64f408c4" tags = { pike = "permissions" } }
package terraform.validation # Set default validation state for aws_egress_only_internet_gateway default is_valid_egress_only_internet_gateway = false # Validate aws_egress_only_internet_gateway resource is_valid_egress_only_internet_gateway { some i resource := input.configuration.root_module.resources[i] resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_egress_only_internet_gateway.pike","mode":"managed","type":"aws_egress_only_internet_gateway","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-e...
284
I need a way for systems in our private network to reach outside services using modern addressing, but without opening them up to being contacted from the outside.
creates an egress-only internet gateway associated with a specified VPC, allowing IPv6-enabled instances to connect to the internet without allowing inbound internet traffic
Has one "aws_egress_only_internet_gateway" resource: Associated with a specified "aws_vpc" resource Designed to allow IPv6-enabled instances within the VPC to connect to the internet while preventing inbound internet traffic
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 5.0" } } } provider "aws" { region = "us-east-1" } resource "aws_egress_only_internet_gateway" "pike" { vpc_id = "vpc-0c33dc8cd64f408c4" tags = { pike = "permissions" } }
package terraform.validation # Set default validation state for aws_egress_only_internet_gateway default is_valid_egress_only_internet_gateway = false # Validate aws_egress_only_internet_gateway resource is_valid_egress_only_internet_gateway { some i resource := input.configuration.root_module.resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_egress_only_internet_gateway.pike","mode":"managed","type":"aws_egress_only_internet_gateway","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"region":"us-e...
285
I need our internal network to automatically point new machines to the name resolvers we prefer, and keep things organized with a simple tag for permissions.
creates DHCP options named "pike" for an AWS VPC, specifying Google's DNS servers (8.8.8.8 and 8.8.4.4), and then associates these DHCP options with a specified VPC, tagging both the DHCP options and their association with "permissions".
Has one "aws_vpc_dhcp_options" resource: Specifies Google's DNS servers (8.8.8.8 and 8.8.4.4) as the DNS servers for the DHCP options Tagged with "permissions" to categorize or specify its role or access levels within the infrastructure Has one "aws_vpc_dhcp_options_association" resource: Associates the "aws_vpc_dhcp...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_dhcp_options_association" "pike" { dhcp_...
package terraform.validation # Set default validation states default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc_dhcp_options resource is_valid_dhcp_options { some i resource := input.configuration.root_module.resources[i] resource.type =...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc_dhcp_options.pike","mode":"managed","type":"aws_vpc_dhcp_options","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"domain_name":null,"domain_name_server...
286
I need to make sure the systems in our private network automatically use the right naming service whenever they come online, and I’d like everything involved to be labeled clearly for access control.
creates DHCP options named "pike" for an AWS VPC, specifying Google's DNS servers, and then associates these DHCP options with a specified VPC, tagging both the DHCP options and their association with "permissions".
Has one "aws_vpc_dhcp_options" resource: Specifies Google's DNS servers Tagged with "permissions" to categorize or specify its role or access levels within the infrastructure Has one "aws_vpc_dhcp_options_association" resource: Associates the "aws_vpc_dhcp_options" with a specified "aws_vpc" resource Tagged with "per...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_dhcp_options_association" "pike" { dhcp_...
package terraform.validation # Set default validation states default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc_dhcp_options resource is_valid_dhcp_options { some i resource := input.configuration.root_module.resources[i] resource.type =...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc_dhcp_options.pike","mode":"managed","type":"aws_vpc_dhcp_options","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"domain_name":null,"domain_name_server...
287
I want all the systems inside our private network to automatically use a specific set of name lookups, so we don’t have to configure that manually on each machine.
creates DHCP options named "pike" for an AWS VPC, specifying Google's DNS servers (8.8.8.8 and 8.8.4.4), and then associates these DHCP options with a specified VPC
Has one "aws_vpc_dhcp_options" resource: Specifies Google's DNS servers (8.8.8.8 and 8.8.4.4) as the DNS servers for the DHCP options Tagged with "permissions" to categorize or specify its role or access levels within the infrastructure Has one "aws_vpc_dhcp_options_association" resource: Associates the "aws_vpc_dhcp...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_dhcp_options_association" "pike" { dhcp_...
package terraform.validation # Set default validation states default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc_dhcp_options resource is_valid_dhcp_options { some i resource := input.configuration.root_module.resources[i] resource.type =...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc_dhcp_options.pike","mode":"managed","type":"aws_vpc_dhcp_options","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"domain_name":null,"domain_name_server...
288
I need a private network for our systems, and I want all the machines in it to automatically pick up some custom naming and lookup settings so everything can find each other reliably.
sets up a basic AWS network infrastructure consisting of a Virtual Private Cloud (VPC) with a custom DHCP options set. The VPC is configured with a CIDR block of 192.168.0.0/16. The DHCP options include a domain name (windomain.local), a combination of custom and Google's DNS servers (192.168.56.102 and 8.8.8.8), and a...
Has one "aws_vpc" resource: Configured with a "cidr_block" of "192.168.0.0/16" Includes associated custom DHCP options Has one "aws_vpc_dhcp_options" resource: Specifies a "domain_name" of "windomain.local" Includes DNS servers with a combination of a custom DNS server ("192.168.56.102") and Google's DNS server ("8.8....
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "default" { cidr_block = "192.168.0.0/16...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.default","mode":"managed","type":"aws_vpc","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"192....
289
I need a private network for our systems where new machines automatically get the right basic settings for things like naming and where to look for shared resources, and it should all work smoothly without a lot of manual setup.
sets up a basic AWS network infrastructure consisting of a Virtual Private Cloud (VPC) with a custom DHCP options set. The VPC is configured with a CIDR block The DHCP options include a domain name (windomain.local), a combination of custom and Google's DNS servers (192.168.56.102 and 8.8.8.8), and a NetBIOS name serve...
Has one "aws_vpc" resource: Configured with a "cidr_block" Includes associated custom DHCP options Has one "aws_vpc_dhcp_options" resource: Specifies a "domain_name" of "windomain.local" Includes DNS servers with a combination of a custom DNS server ("192.168.56.102") and Google's DNS server ("8.8.8.8") Configures a "...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "default" { cidr_block = "192.168.0.0/16...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.default","mode":"managed","type":"aws_vpc","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"192....
290
I need a private network for our systems where connected machines automatically pick up some custom naming and lookup settings, so everything works smoothly without manual setup.
sets up a basic AWS network infrastructure consisting of a Virtual Private Cloud (VPC) with a custom DHCP options set. The VPC is configured with a CIDR block of 192.168.0.0/16. The DHCP options include a domain name (windomain.local), a combination of custom and Google's DNS servers, and a NetBIOS name server (192.168...
Has one "aws_vpc" resource: Configured with a "cidr_block" of "192.168.0.0/16" Includes associated custom DHCP options Has one "aws_vpc_dhcp_options" resource: Specifies a "domain_name" of "windomain.local" Includes DNS servers with a combination of a custom DNS server and Google's DNS server Configures a "NetBIOS" na...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "default" { cidr_block = "192.168.0.0/16...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.default","mode":"managed","type":"aws_vpc","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"192....
291
I need a private network for our systems, and I want all the machines in it to automatically pick up some custom naming and lookup settings so they can find each other easily and work with our existing environment.
sets up a basic AWS network infrastructure consisting of a Virtual Private Cloud (VPC) with a custom DHCP options set. The VPC is configured with a CIDR block of 192.168.0.0/16. The DHCP options include a domain name (windomain.local), a combination of custom and Google's DNS servers (192.168.56.102 and 8.8.8.8), and a...
Has one "aws_vpc" resource: Configured with a "cidr_block" of "192.168.0.0/16" Includes associated custom DHCP options Has one "aws_vpc_dhcp_options" resource: Specifies a "domain_name" of "windomain.local" Includes DNS servers with a combination of a custom DNS server ("192.168.56.102") and Google's DNS server ("8.8....
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "default" { cidr_block = "192.168.0.0/16...
package terraform.validation # Set default validation states default is_valid_vpc = false default is_valid_dhcp_options = false default is_valid_dhcp_options_association = false # Validate aws_vpc resource is_valid_vpc { some i resource := input.configuration.root_module.resources[i] resourc...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.default","mode":"managed","type":"aws_vpc","name":"default","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"192....
292
I need a way for systems in a restricted part of our network to reach the outside world reliably, and it should be clear from its metadata what team is responsible for it.
creates a NAT Gateway associated with a specified subnet and Elastic IP allocation ID. The NAT Gateway is configured for public connectivity and tagged with a key-value pair (pike = "permissions"), indicating its purpose or ownership.
Has one "aws_nat_gateway" resource: Associated with a specified "aws_subnet" resource for hosting the NAT Gateway. Utilizes an "Elastic IP allocation ID" to provide the NAT Gateway with a public IP address. Configured for public connectivity, allowing resources within the private subnet to access the internet securely....
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_nat_gateway" "pike" { subnet_id = "su...
package terraform.validation # Set default validation state default is_valid_nat_gateway = false # Validate aws_nat_gateway resource is_valid_nat_gateway { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_nat_gateway" # Ensure it is associated with...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_nat_gateway.pike","mode":"managed","type":"aws_nat_gateway","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"allocation_id":"eipalloc-0047fa56c40637c3b","av...
293
I need a way for systems in a private part of our network to reach the outside world reliably without exposing themselves directly.
creates a NAT Gateway associated with a specified subnet and Elastic IP allocation ID. The NAT Gateway is configured for public connectivity.
Has one "aws_nat_gateway" resource: Associated with a specified "aws_subnet" resource for hosting the NAT Gateway. Utilizes an "Elastic IP allocation ID" to provide the NAT Gateway with a public IP address. Configured for public connectivity, allowing resources within the private subnet to access the internet securely.
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_nat_gateway" "pike" { subnet_id = "su...
package terraform.validation # Set default validation state default is_valid_nat_gateway = false # Validate aws_nat_gateway resource is_valid_nat_gateway { some i resource := input.configuration.root_module.resources[i] resource.type == "aws_nat_gateway" # Ensure it is associated with...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_nat_gateway.pike","mode":"managed","type":"aws_nat_gateway","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"allocation_id":"eipalloc-0047fa56c40637c3b","av...
294
I need two separate private environments to talk to each other directly so that apps in each one can share data smoothly, and it should stay simple while keeping things organized for whoever manages it.
establishes a VPC peering connection between two AWS Virtual Private Clouds (VPCs). The first VPC, named "peer," is configured with a CIDR block of 10.0.0.0/24, and the second VPC, named "base," has a CIDR block of 10.1.0.0/24. The peering connection, named "pike," connects these two VPCs, allowing them to communicate ...
Has two "aws_vpc" resources: The first VPC is named "peer" and is configured with a "cidr_block" of "10.0.0.0/24". The second VPC is named "base" and is configured with a "cidr_block" of "10.1.0.0/24". Has one "aws_vpc_peering_connection" resource: Named "pike" to connect the two specified VPCs ("peer" and "base"). Co...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_peering_connection" "pike" { peer_vpc_id ...
package terraform.validation # Set default validation states default is_valid_vpcs = false default is_valid_vpc_peering_connection = false # Validate aws_vpc resources is_valid_vpcs { # Validate the first VPC named "peer" peer_vpc := input.configuration.root_module.resources[_] peer_vpc.type == "aws_vpc" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.base","mode":"managed","type":"aws_vpc","name":"base","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"10.1.0.0/2...
295
I need two separate private networks to be able to talk to each other directly so systems in each one can work together without going over the public internet.
establishes a VPC peering connection between two AWS Virtual Private Clouds (VPCs). The first VPC, named "peer," is configured with a CIDR block of 10.0.0.0/24, and the second VPC, named "base," has a CIDR block of 10.1.0.0/24. The peering connection, named "pike," connects these two VPCs, allowing them to communicate ...
Has two "aws_vpc" resources: The first VPC is named "peer" and is configured with a "cidr_block" of "10.0.0.0/24". The second VPC is named "base" and is configured with a "cidr_block" of "10.1.0.0/24". Has one "aws_vpc_peering_connection" resource: Named "pike" to connect the two specified VPCs ("peer" and "base"). Co...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_peering_connection" "pike" { peer_vpc_id ...
package terraform.validation # Set default validation states default is_valid_vpcs = false default is_valid_vpc_peering_connection = false # Validate aws_vpc resources is_valid_vpcs { # Validate the first VPC named "peer" peer_vpc := input.configuration.root_module.resources[_] peer_vpc.type == "aws_vpc" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.base","mode":"managed","type":"aws_vpc","name":"base","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"10.1.0.0/2...
296
I need two separate private networks to be able to talk to each other directly so systems on both sides can work together smoothly, and it should be kept simple and secure.
establishes a VPC peering connection between two AWS Virtual Private Clouds (VPCs). The first VPC name is "peer," and the second VPC name is "base". The peering connection, named "pike," connects these two VPCs, allowing them to communicate with each other as if they were in the same network, and is tagged with pike = ...
Has two "aws_vpc" resources: The first VPC is named "peer" The second VPC is named "base" Has one "aws_vpc_peering_connection" resource: Named "pike" to connect the two specified VPCs ("peer" and "base"). Configured to allow the VPCs to communicate with each other as if they were in the same network. Tagged with "pike...
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc_peering_connection" "pike" { peer_vpc_id ...
package terraform.validation # Set default validation states default is_valid_vpcs = false default is_valid_vpc_peering_connection = false # Validate aws_vpc resources is_valid_vpcs { # Validate the first VPC named "peer" peer_vpc := input.configuration.root_module.resources[_] peer_vpc.type == "aws_vpc" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_vpc.base","mode":"managed","type":"aws_vpc","name":"base","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":1,"values":{"assign_generated_ipv6_cidr_block":null,"cidr_block":"10.1.0.0/2...
297
I need everything in this environment to communicate freely with anything else for now, just to keep things simple while we’re getting started.
defines a default network ACL with unrestricted ingress and egress rules allowing all traffic from and to any source.
Has one "aws_default_network_acl" resource: with unrestricted ingress and egress rules allowing all traffic
provider "aws" { region = "us-west-1" } resource "aws_default_network_acl" "pike" { default_network_acl_id = "acl-01c8f6820c190c9dd" ingress { protocol = -1 rule_no = 100 action = "allow" cidr_block = "0.0.0.0/0" from_port = 0 to_port = 0 } egress { protocol = -1 ...
package terraform.validation # Set default validation states default is_valid_network_acl = false # Validate aws_default_network_acl resources is_valid_network_acl { some i network_acl := input.configuration.root_module.resources[i] network_acl.type == "aws_default_network_acl" ...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_default_network_acl.pike","mode":"managed","type":"aws_default_network_acl","name":"pike","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"default_network_acl_id":"acl-01...
298
I need a private area for our systems to run in, and I want to tightly control which kinds of connections can go in and out for security.
configures AWS as the cloud provider. It defines an AWS VPC with a specific CIDR block of 10.0.0.0/16 and a network ACL linked to this VPC that allows specific TCP traffic for both ingress (10.3.0.0/18 on port 80) and egress (10.3.0.0/18 on port 443) rules.
Has one "aws_vpc" resource: with a specified "cidr_block" of "10.0.0.0/16" Has one "aws_network_acl" resource: linked to the "aws_vpc" resource with ingress rules allowing TCP traffic from "10.3.0.0/18" on port 80 with egress rules allowing TCP traffic to "10.3.0.0/18" on port 443
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "example" { cidr_block = "10.0.0.0/16" }...
package terraform.validation # Set default validation states default is_valid_network_acl = false # Validate aws_network_acl resources is_valid_network_acl { some i network_acl := input.configuration.root_module.resources[i] network_acl.type == "aws_network_acl" network_acl.name == "ex...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_network_acl.example","mode":"managed","type":"aws_network_acl","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"egress":[{"action":"allow","cidr_block":"...
299
I need a private area in the cloud for our systems to run in, and I want to make sure only certain kinds of traffic are allowed in and out for security.
configures AWS as the cloud provider. It defines an AWS VPC with a specific CIDR block of 10.0.0.0/16 and a network ACL linked to this VPC that allows specific TCP traffic for both ingress and egress rules.
Has one "aws_vpc" resource: with a specified "cidr_block" of "10.0.0.0/16" Has one "aws_network_acl" resource: linked to the "aws_vpc" resource with ingress rules allowing TCP traffic with egress rules allowing TCP traffic
terraform { required_providers { aws = { source = "hashicorp/aws" version = ">= 4.16" } } required_version = ">= 1.2.0" } # Define the provider block for AWS provider "aws" { region = "us-east-2" # Set your desired AWS region } resource "aws_vpc" "example" { cidr_block = "10.0.0.0/16" }...
package terraform.validation # Set default validation states default is_valid_network_acl = false # Validate aws_network_acl resources is_valid_network_acl { some i network_acl := input.configuration.root_module.resources[i] network_acl.type == "aws_network_acl" network_acl.name == "ex...
{"format_version":"1.2","terraform_version":"1.14.3","planned_values":{"root_module":{"resources":[{"address":"aws_network_acl.example","mode":"managed","type":"aws_network_acl","name":"example","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"egress":[{"action":"allow","cidr_block":"...