repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/tests/test_stack.py | modules/analysis/rosbag-image-pipeline-sfn/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
rosbag_stack = stack.TemplateStack(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
hash="foobar",
vpc_id="vpc-id",
source_bucket_name="raw-bucket",
target_bucket_name="intermediate-bucket",
logs_bucket_name="logs-bucket",
artifacts_bucket_name="artifacts-bucket",
private_subnet_ids='["foo","bar"]',
emr_job_exec_role_arn="dummy-emr-role",
emr_app_id="012345678",
detection_ddb_name="scene-metadata-table",
on_demand_job_queue_arn="arn:aws:batch:us-west-2:123456789101:job-queue/addf-example",
fargate_job_queue_arn="arn:aws:batch:us-west-2:123456789101:job-queue/addf-example",
parquet_batch_job_def_arn="dummyarn",
png_batch_job_def_arn="dummyarn",
object_detection_image_uri="dummyuri",
object_detection_role_arn="dummyrole",
object_detection_job_concurrency=10,
object_detection_instance_type="ml.2xlarge",
lane_detection_image_uri="dummyuri",
lane_detection_role_arn="dummyrole",
lane_detection_job_concurrency=10,
lane_detection_instance_type="ml.2xlarge",
file_suffix=".bag",
desired_encoding="bgr8",
yolo_model="yolov5s",
stack_description="Testing",
image_topics='["foo", "bar"]',
sensor_topics='["foo", "bar"]',
env=cdk.Environment(
account=(os.environ["CDK_DEFAULT_ACCOUNT"]),
region=(os.environ["CDK_DEFAULT_REGION"]),
),
)
template = Template.from_stack(rosbag_stack)
template.resource_count_is("AWS::DynamoDB::Table", 1)
template.resource_count_is("AWS::IAM::Role", 3)
template.resource_count_is("AWS::StepFunctions::StateMachine", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/tests/__init__.py | modules/analysis/rosbag-image-pipeline-sfn/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/lambda/create-batch-of-drives/src/lambda_function.py | modules/analysis/rosbag-image-pipeline-sfn/lambda/create-batch-of-drives/src/lambda_function.py | import logging
import os
import typing
import boto3
from boto3.dynamodb.conditions import Key
if typing.TYPE_CHECKING:
from mypy_boto3_dynamodb.service_resource import DynamoDBServiceResource
from mypy_boto3_s3.client import S3Client
logger = logging.getLogger()
logger.setLevel("DEBUG")
dynamodb_resource = boto3.resource("dynamodb")
s3_client = boto3.client("s3")
DYNAMODB_TABLE = os.environ["DYNAMODB_TABLE"]
FILE_SUFFIX = os.environ["FILE_SUFFIX"]
def add_drives_to_batch(
table: str,
batch_id: str,
drives_to_process: typing.Dict[str, dict],
file_suffix: str,
s3_client: "S3Client",
) -> int:
"""
Lists files with file_suffix for each prefix in drives_to_process and adds each file to DynamoDB for tracking
@param table: dynamo tracking table
@param batch_id: state machine execution id
@param drives_to_process: {
"drive1": {"bucket": "addf-example-dev-raw-bucket-xyz", "prefix": "rosbag-scene-detection/drive1/"},
"drive2": {"bucket": "addf-example-dev-raw-bucket-xyz", "prefix": "rosbag-scene-detection/drive2/"},
}
@param file_suffix: ".bag"
@param s3_client: type boto3.client('s3')
"""
drives_and_files = {}
files_in_batch = 0
for drive_id, s3_path in drives_to_process.items():
files = get_drive_files(
src_bucket=s3_path["bucket"],
src_prefix=s3_path["prefix"],
file_suffix=file_suffix,
s3_client=s3_client,
)
drives_and_files[drive_id] = {"files": files, "bucket": s3_path["bucket"]}
files_in_batch += len(files)
logger.info(f"files_in_batch {files_in_batch}")
batch_write_files_to_dynamo(table, drives_and_files, batch_id)
return files_in_batch
def get_drive_files(src_bucket: str, src_prefix: str, file_suffix: str, s3_client: "S3Client") -> typing.List[str]:
"""For a given bucket, prefix, and suffix, lists all files found on S3 and returns a list of the files
@param drive_id:
@param src_bucket:
@param src_prefix:
@param file_suffix:
@param s3_client:
@return:
"""
MAX_KEYS = 1000
logger.info(src_bucket)
logger.info(src_prefix)
file_response = s3_client.list_objects_v2(Bucket=src_bucket, Prefix=src_prefix, MaxKeys=MAX_KEYS, Delimiter="/")
logger.info(file_response)
file_next_continuation = file_response.get("NextContinuationToken")
files = [x["Key"] for x in file_response.get("Contents", []) if x["Key"].endswith(file_suffix)]
while file_next_continuation is not None:
file_response = s3_client.list_objects_v2(
Bucket=src_bucket,
Prefix=src_prefix,
MaxKeys=MAX_KEYS,
Delimiter="/",
ContinuationToken=file_next_continuation,
)
file_next_continuation = file_response.get("NextContinuationToken")
files += [x["Key"] for x in file_response.get("Contents", [])]
logger.info(files)
return files
def batch_write_files_to_dynamo(
table: "DynamoDBServiceResource",
drives_and_files: typing.Dict[str, typing.List[str]],
batch_id: str,
) -> None:
with table.batch_writer() as writer:
idx = 0
for drive_id, files in drives_and_files.items():
for file in files["files"]:
item = {
"drive_id": drive_id,
"file_id": file.split("/")[-1],
"s3_bucket": files["bucket"],
"s3_key": file,
"pk": batch_id,
"sk": str(idx),
}
logger.info(item)
writer.put_item(Item=item)
idx += 1
def lambda_handler(event: typing.Dict[str, typing.Any], context: typing.Any) -> int:
drives_to_process = event["DrivesToProcess"]
execution_id = event["ExecutionID"]
table = dynamodb_resource.Table(DYNAMODB_TABLE)
files_in_batch = table.query(
KeyConditionExpression=Key("pk").eq(execution_id),
Select="COUNT",
)["Count"]
if files_in_batch > 0:
logger.info("Batch Id already exists in tracking table - using existing batch")
return {"BatchSize": files_in_batch, "ExecutionID": execution_id}
logger.info("New Batch Id - collecting unprocessed drives from S3 and adding to the batch")
files_in_batch = add_drives_to_batch(
table=table,
drives_to_process=drives_to_process,
batch_id=execution_id,
file_suffix=FILE_SUFFIX,
s3_client=s3_client,
)
if files_in_batch > 10_000:
raise RuntimeError("Batch Size cannot exceed 10,000")
return {"BatchSize": files_in_batch, "ExecutionID": execution_id}
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/emr-scripts/detect_scenes.py | modules/analysis/rosbag-image-pipeline-sfn/emr-scripts/detect_scenes.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import sys
from typing import Any, Dict, List
import boto3
import pyspark.sql.functions as func
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import (
aggregate,
col,
concat,
count,
first,
from_json,
lit,
split,
)
from pyspark.sql.types import (
ArrayType,
DoubleType,
IntegerType,
StringType,
StructField,
StructType,
)
obj_schema = StructType(
[
StructField("_c0", IntegerType(), True),
StructField("xmin", DoubleType(), True),
StructField("ymin", DoubleType(), True),
StructField("xmax", DoubleType(), True),
StructField("ymax", DoubleType(), True),
StructField("confidence", DoubleType(), True),
StructField("class", IntegerType(), True),
StructField("name", StringType(), True),
StructField("source_image", StringType(), True),
]
)
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument("--batch-metadata-table-name", required=True)
parser.add_argument("--batch-id", required=True)
parser.add_argument("--input-bucket", required=False)
parser.add_argument("--output-bucket", required=True)
parser.add_argument("--output-dynamo-table", required=True)
parser.add_argument("--region", required=True)
parser.add_argument("--image-topics", required=False)
return parser.parse_args(args=args)
def form_object_in_lane_df(obj_df, lane_df):
obj_lane_df = obj_df.join(lane_df, on="source_image", how="inner")
obj_lane_df = (
obj_lane_df.withColumn(
"pixel_rows_at_bottom_corner",
obj_lane_df["lanes"].getItem(obj_lane_df.ymax.cast(IntegerType())),
)
.drop("lanes")
.drop("ymax")
)
obj_lane_df = obj_lane_df.withColumn(
"sum_of_pixels_intensities_at_bottom_corner_rows",
aggregate("pixel_rows_at_bottom_corner", lit(0), lambda acc, x: acc + x),
).drop("pixel_rows_at_bottom_corner")
obj_lane_df = obj_lane_df.filter(obj_lane_df.sum_of_pixels_intensities_at_bottom_corner_rows != 0)
obj_lane_df = obj_lane_df.withColumn("source_image_split", split("source_image", "_"))
obj_lane_df = obj_lane_df.withColumn(
"Time",
concat(
obj_lane_df["source_image_split"].getItem(2),
lit("."),
obj_lane_df["source_image_split"].getItem(3).substr(1, 2),
).cast(DoubleType()),
)
return obj_lane_df
def get_batch_file_metadata(table_name, batch_id, region):
dynamodb = boto3.resource("dynamodb", region_name=region)
table = dynamodb.Table(table_name)
response = table.query(KeyConditions={"pk": {"AttributeValueList": [batch_id], "ComparisonOperator": "EQ"}})
data = response["Items"]
while "LastEvaluatedKey" in response:
response = table.query(ExclusiveStartKey=response["LastEvaluatedKey"])
data.update(response["Items"])
return data
def load_obj_detection(spark, batch_metadata, image_topics):
path_list = []
def _process(
image_topics: List[str],
path_list: List[str],
item: Dict[str, Any],
resizied_image_dir: str,
) -> None:
for t in image_topics:
path_list.append(
f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_obj_dets/all_predictions.csv"
) if t in resizied_image_dir else None
for item in batch_metadata:
for resizied_image_dir in item["resized_image_dirs"]:
if image_topics and len(image_topics) > 0:
_process(image_topics, path_list, item, resizied_image_dir)
else:
path_list.append(
f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_obj_dets/all_predictions.csv"
)
def remove_space(name):
return name.replace(" ", "_")
remove_space_udf = func.udf(remove_space, StringType())
df = spark.read.schema(obj_schema).option("header", True).csv(path_list)
df2 = df.withColumn("name", remove_space_udf(df.name))
print("Number of rows in Object Detection dataframe")
print(df2.count())
return df2
def load_lane_detection(spark, batch_metadata):
first_item = batch_metadata[0]
first_path_prefix = first_item["resized_image_dirs"][0]
first_path = f"s3://{first_item['raw_image_bucket']}/{first_path_prefix}_post_lane_dets/lanes.csv"
first_item_split = first_item["s3_key"].rpartition("/")
bag_file_prefix = first_item_split[0]
bag_file = first_item_split[2].split(".")[0]
df = (
spark.read.option("header", True)
.csv(first_path)
.withColumn("bag_file_bucket", lit(first_item["s3_bucket"]))
.withColumn("bag_file", lit(bag_file))
.withColumn("bag_file_prefix", lit(bag_file_prefix))
)
for i in range(1, len(batch_metadata)):
item = batch_metadata[i]
for resizied_image_dir in item["resized_image_dirs"]:
path = f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_lane_dets/lanes.csv"
item_split = item["s3_key"].rpartition("/")
bag_file_prefix = item_split[0]
bag_file = item_split[2].split(".")[0]
df.union(
spark.read.option("header", True)
.csv(path)
.withColumn("bag_file_bucket", lit(item["s3_bucket"]))
.withColumn("bag_file", lit(bag_file))
.withColumn("bag_file_prefix", lit(bag_file_prefix))
)
lane_schema = ArrayType(ArrayType(IntegerType()), False)
df = df.withColumn("lanes", from_json(col("lanes"), lane_schema))
return df
def write_results_s3(dfs: Dict[str, Any], table_name: str, output_bucket: str, partition_cols=[]):
for key, df in dfs.items():
s3_path = f"s3://{output_bucket}/{table_name}/{key}"
df.write.mode("append").partitionBy(*partition_cols).parquet(s3_path)
def write_results_dynamo(dfs, output_dynamo_table, region):
for key, df in dfs.items():
print(f"Writing {key} to DDB")
df.write.mode("append").option("tableName", output_dynamo_table).option("region", region).format(
"dynamodb"
).save()
def summarize_obj_in_lane_scenes(obj_lane_df, image_topics, obj_type):
obj_lane_df = obj_lane_df.filter(obj_lane_df.name == obj_type)
obj_lane_df = obj_lane_df.groupby("source_image").agg(
count("name").alias(f"num_{obj_type}_in_lane"),
first("Time").alias("Time"),
first("bag_file_bucket").alias("bag_file_bucket"),
first("bag_file").alias("bag_file"),
first("bag_file_prefix").alias("bag_file_prefix"),
)
win = Window.orderBy("Time").partitionBy("bag_file_bucket", "bag_file", "bag_file_prefix")
obj_lane_df = (
obj_lane_df.withColumn(
f"num_{obj_type}_in_lane_lag1",
func.lag(func.col(f"num_{obj_type}_in_lane"), 1, 0).over(win),
)
.withColumn(
f"num_{obj_type}_in_lane_lead1",
func.lead(func.col(f"num_{obj_type}_in_lane"), 1, 0).over(win),
)
.filter(f"num_{obj_type}_in_lane_lag1 == 0 or num_{obj_type}_in_lane_lead1 ==0")
)
scene_state_udf = func.udf(
lambda num, lag: "start" if num > 0 and lag is None else ("end" if num == 0 and lag > 0 else None),
StringType(),
)
obj_in_lane_scenes_df = (
obj_lane_df.withColumn(
"scene_state",
scene_state_udf(
obj_lane_df[f"num_{obj_type}_in_lane"],
obj_lane_df[f"num_{obj_type}_in_lane_lag1"],
),
)
.withColumn("end_time", func.lead(func.col("Time"), 1).over(win))
.filter(f"num_{obj_type}_in_lane_lag1 ==0")
.withColumnRenamed("Time", "start_time")
.withColumnRenamed(f"num_{obj_type}_in_lane", f"num_{obj_type}_in_lane_start")
.select(
"bag_file",
"bag_file_bucket",
"bag_file_prefix",
"start_time",
"end_time",
f"num_{obj_type}_in_lane_start",
)
.withColumn(
"scene_id",
func.concat(
func.col("bag_file"),
func.lit(f"_{obj_type}InLane_"),
func.col("start_time"),
),
)
.withColumn("scene_length", func.col("end_time") - func.col("start_time"))
.withColumn("topics_analyzed", func.lit(",".join(image_topics)))
)
return obj_in_lane_scenes_df
def main(
batch_metadata_table_name,
batch_id,
output_bucket,
output_dynamo_table,
spark,
region,
image_topics,
):
# Load files to process
batch_metadata = get_batch_file_metadata(table_name=batch_metadata_table_name, batch_id=batch_id, region=region)
if image_topics:
image_topics = json.loads(image_topics)
image_topics = [topic.replace("/", "_") for topic in image_topics if image_topics]
# Load topic data from s3 and union
obj_df = load_obj_detection(spark, batch_metadata=batch_metadata, image_topics=image_topics)
lane_df = load_lane_detection(
spark,
batch_metadata=batch_metadata,
)
obj_lane_df = form_object_in_lane_df(obj_df, lane_df)
dfs = {}
dfs["car"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "car")
dfs["truck"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "truck")
dfs["traffic_light"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "traffic_light")
dfs["train"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "train")
dfs["bus"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "bus")
dfs["motorcycle"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "motorcycle")
dfs["stop_sign"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "stop_sign")
dfs["fire_hydrant"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "fire_hydrant")
write_results_s3(
dfs,
table_name="scene_detections",
output_bucket=output_bucket,
partition_cols=["bag_file"],
)
write_results_dynamo(dfs, output_dynamo_table, region)
if __name__ == "__main__":
spark = SparkSession.builder.appName("scene-detection").getOrCreate()
sc = spark.sparkContext
arguments = parse_arguments(sys.argv[1:])
batch_metadata_table_name = arguments.batch_metadata_table_name
batch_id = arguments.batch_id
output_bucket = arguments.output_bucket
output_dynamo_table = arguments.output_dynamo_table
region = arguments.region
image_topics = arguments.image_topics if arguments.image_topics else None
main(
batch_metadata_table_name,
batch_id,
output_bucket,
output_dynamo_table,
spark,
region,
image_topics,
)
sc.stop()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/stack.py | modules/analysis/rosbag-image-pipeline/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, List, cast
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Duration, RemovalPolicy, Stack, Tags
from aws_cdk import aws_dynamodb as dynamo
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class AwsBatchPipeline(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
vpc_id: str,
mwaa_exec_role: str,
bucket_access_policy: str,
object_detection_role: str,
lane_detection_role: str,
job_queues: List[str],
job_definitions: List[str],
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
self.vpc_id = vpc_id
self.mwaa_exec_role = mwaa_exec_role
self.bucket_access_policy = bucket_access_policy
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment",
value="aws",
)
dep_mod = f"{self.project_name}-{self.deployment_name}-{self.module_name}"
# DYNAMODB TRACKING TABLE
self.tracking_table_name = f"{dep_mod}-drive-tracking"
tracking_partition_key = "pk" # batch_id or drive_id
tracking_sort_key = "sk" # batch_id / array_index_id or drive_id / file_part
dynamo.Table(
self,
self.tracking_table_name,
table_name=self.tracking_table_name,
partition_key=dynamo.Attribute(name=tracking_partition_key, type=dynamo.AttributeType.STRING),
sort_key=dynamo.Attribute(name=tracking_sort_key, type=dynamo.AttributeType.STRING),
billing_mode=dynamo.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY,
point_in_time_recovery=True,
stream=dynamo.StreamViewType.NEW_AND_OLD_IMAGES,
)
# Create Dag IAM Role and policy
policy_statements = [
iam.PolicyStatement(
actions=["dynamodb:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:dynamodb:{self.region}:{self.account}:table/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=[
"batch:UntagResource",
"batch:DeregisterJobDefinition",
"batch:TerminateJob",
"batch:CancelJob",
"batch:SubmitJob",
"batch:RegisterJobDefinition",
"batch:TagResource",
],
effect=iam.Effect.ALLOW,
resources=[
*job_queues,
*job_definitions,
f"arn:{self.partition}:batch:{self.region}:{self.account}:job/*",
],
),
iam.PolicyStatement(
actions=[
"iam:PassRole",
],
effect=iam.Effect.ALLOW,
resources=[x for x in [object_detection_role, lane_detection_role] if x is not None],
),
iam.PolicyStatement(
actions=[
"batch:Describe*",
"batch:List*",
],
effect=iam.Effect.ALLOW,
resources=[
"*",
],
),
iam.PolicyStatement(
actions=["s3:GetObject", "s3:GetObjectAcl", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{project_name}-*",
f"arn:{self.partition}:s3:::{project_name}-*/*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
dag_role_name = f"{dep_mod}-dag-{self.region}"
self.dag_role = iam.Role(
self,
f"dag-role-{dep_mod}",
assumed_by=iam.CompositePrincipal(
iam.ArnPrincipal(self.mwaa_exec_role),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_managed_policy_arn(
self, id="fullaccess", managed_policy_arn=self.bucket_access_policy
),
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSageMakerFullAccess"),
],
role_name=dag_role_name,
max_session_duration=Duration.hours(12),
path="/",
)
# Sagemaker Security Group
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=self.vpc_id,
)
self.sm_sg = ec2.SecurityGroup(
self,
"SagemakerJobsSG",
vpc=self.vpc,
allow_all_outbound=True,
description="Sagemaker Processing Jobs SG",
)
self.sm_sg.add_ingress_rule(peer=self.sm_sg, connection=ec2.Port.all_traffic())
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/app.py | modules/analysis/rosbag-image-pipeline/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
from aws_cdk import App, CfnOutput, Environment
from stack import AwsBatchPipeline
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
dag_id = os.getenv(_param("DAG_ID"))
vpc_id = os.getenv(_param("VPC_ID"))
private_subnet_ids = json.loads(os.getenv(_param("PRIVATE_SUBNET_IDS"))) # type: ignore
mwaa_exec_role = os.getenv(_param("MWAA_EXEC_ROLE"))
full_access_policy = os.getenv(_param("FULL_ACCESS_POLICY_ARN"))
source_bucket_name = os.getenv(_param("SOURCE_BUCKET"))
target_bucket_name = os.getenv(_param("INTERMEDIATE_BUCKET"))
dag_bucket_name = os.getenv(_param("DAG_BUCKET_NAME"))
log_bucket_name = os.getenv(_param("LOGS_BUCKET_NAME"))
detection_ddb_name = os.getenv(_param("ROSBAG_SCENE_METADATA_TABLE"))
on_demand_job_queue = os.getenv(_param("ON_DEMAND_JOB_QUEUE_ARN"))
spot_job_queue = os.getenv(_param("SPOT_JOB_QUEUE_ARN"))
fargate_job_queue = os.getenv(_param("FARGATE_JOB_QUEUE_ARN"))
parquet_batch_job_def_arn = os.getenv(_param("PARQUET_BATCH_JOB_DEF_ARN"))
png_batch_job_def_arn = os.getenv(_param("PNG_BATCH_JOB_DEF_ARN"))
object_detection_image_uri = os.getenv(_param("OBJECT_DETECTION_IMAGE_URI"))
object_detection_role = os.getenv(_param("OBJECT_DETECTION_IAM_ROLE"))
object_detection_job_concurrency = int(os.getenv(_param("OBJECT_DETECTION_JOB_CONCURRENCY"), 10))
object_detection_instance_type = os.getenv(_param("OBJECT_DETECTION_INSTANCE_TYPE"), "ml.m5.xlarge")
lane_detection_image_uri = os.getenv(_param("LANE_DETECTION_IMAGE_URI"))
lane_detection_role = os.getenv(_param("LANE_DETECTION_IAM_ROLE"))
lane_detection_job_concurrency = int(os.getenv(_param("LANE_DETECTION_JOB_CONCURRENCY"), 5))
lane_detection_instance_type = os.getenv(_param("LANE_DETECTION_INSTANCE_TYPE"), "ml.p3.2xlarge")
file_suffix = os.getenv(_param("FILE_SUFFIX"), ".bag")
desired_encoding = os.getenv(_param("DESIRED_ENCODING"), "bgr8")
yolo_model = os.getenv(_param("YOLO_MODEL"), "yolo11s")
image_topics = os.getenv(_param("IMAGE_TOPICS"))
sensor_topics = os.getenv(_param("SENSOR_TOPICS"))
if not png_batch_job_def_arn:
raise ValueError("missing input parameter png-batch-job-def-arn")
if not parquet_batch_job_def_arn:
raise ValueError("missing input parameter parquet-batch-job-def-arn")
if not object_detection_role:
raise ValueError("missing input parameter object-detection-iam-role")
if not object_detection_image_uri:
raise ValueError("missing input parameter object-detection-image-uri")
if not lane_detection_role:
raise ValueError("missing input parameter lane-detection-iam-role")
if not lane_detection_image_uri:
raise ValueError("missing input parameter lane-detection-image-uri")
if not vpc_id:
raise ValueError("missing input parameter vpc-id")
if not private_subnet_ids:
raise ValueError("missing input parameter private-subnet-ids")
if not mwaa_exec_role:
raise ValueError("MWAA Execution Role is missing.")
if not full_access_policy:
raise ValueError("S3 Full Access Policy ARN is missing.")
if not on_demand_job_queue and not spot_job_queue and not fargate_job_queue:
raise ValueError("Requires at least one job queue.")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "(SO9154) Autonomous Driving Data Framework (ADDF) - rosbag-image-pipeline"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = AwsBatchPipeline(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
vpc_id=vpc_id,
mwaa_exec_role=mwaa_exec_role,
bucket_access_policy=full_access_policy,
object_detection_role=object_detection_role,
lane_detection_role=lane_detection_role,
job_queues=[x for x in [fargate_job_queue, spot_job_queue, on_demand_job_queue] if x is not None],
job_definitions=[x for x in [png_batch_job_def_arn, parquet_batch_job_def_arn] if x is not None],
stack_description=generate_description(),
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"PrivateSubnetIds": private_subnet_ids,
"DagId": dag_id,
"SecurityGroupId": stack.sm_sg.security_group_id,
"DagRoleArn": stack.dag_role.role_arn,
"DynamoDbTableName": stack.tracking_table_name,
"DetectionsDynamoDBName": detection_ddb_name,
"SourceBucketName": source_bucket_name,
"TargetBucketName": target_bucket_name,
"DagBucketName": dag_bucket_name,
"LogsBucketName": log_bucket_name,
"OnDemandJobQueueArn": on_demand_job_queue,
"SpotJobQueueArn": spot_job_queue,
"FargateJobQueueArn": fargate_job_queue,
"ParquetBatchJobDefArn": parquet_batch_job_def_arn,
"PngBatchJobDefArn": png_batch_job_def_arn,
"ObjectDetectionImageUri": object_detection_image_uri,
"ObjectDetectionRole": object_detection_role,
"ObjectDetectionJobConcurrency": object_detection_job_concurrency,
"ObjectDetectionInstanceType": object_detection_instance_type,
"LaneDetectionImageUri": lane_detection_image_uri,
"LaneDetectionRole": lane_detection_role,
"LaneDetectionJobConcurrency": lane_detection_job_concurrency,
"LaneDetectionInstanceType": lane_detection_instance_type,
"FileSuffix": file_suffix,
"DesiredEncoding": desired_encoding,
"YoloModel": yolo_model,
"ImageTopics": json.loads(image_topics), # type: ignore
"SensorTopics": json.loads(sensor_topics), # type: ignore
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/test_app.py | modules/analysis/rosbag-image-pipeline/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
from json import JSONDecodeError
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_DAG_ID"] = "dag-id"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-id"
os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"] = '["subnet-12345", "subnet-54321"]'
os.environ["SEEDFARMER_PARAMETER_MWAA_EXEC_ROLE"] = "mwaa-exec-role"
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = "full-access-policy-arn"
os.environ["SEEDFARMER_PARAMETER_SOURCE_BUCKET"] = "source-bucket"
os.environ["SEEDFARMER_PARAMETER_INTERMEDIATE_BUCKET"] = "intermediate-bucket"
os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"] = "on-demand-job-queue-arn"
os.environ["SEEDFARMER_PARAMETER_SPOT_JOB_QUEUE_ARN"] = "spot-job-queue-arn"
os.environ["SEEDFARMER_PARAMETER_FARGATE_JOB_QUEUE_ARN"] = "fargate-job-queue-arn"
os.environ["SEEDFARMER_PARAMETER_PARQUET_BATCH_JOB_DEF_ARN"] = "parquet-batch-job-def-arn"
os.environ["SEEDFARMER_PARAMETER_PNG_BATCH_JOB_DEF_ARN"] = "png-batch-job-def-arn"
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_IMAGE_URI"] = "object-detection-image-uri"
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_IAM_ROLE"] = "object-detection-iam-role"
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IMAGE_URI"] = "lane-detection-image-uri"
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IAM_ROLE"] = "lane-detection-iam-role"
os.environ["SEEDFARMER_PARAMETER_IMAGE_TOPICS"] = "{}"
os.environ["SEEDFARMER_PARAMETER_SENSOR_TOPICS"] = "{}"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_png_batch_job_def_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_PNG_BATCH_JOB_DEF_ARN"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter png-batch-job-def-arn" in str(e)
def test_parquet_batch_job_def_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_PARQUET_BATCH_JOB_DEF_ARN"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter parquet-batch-job-def-arn" in str(e)
def test_object_detection_role(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_IAM_ROLE"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter object-detection-iam-role" in str(e)
def test_object_detection_image_uri(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IMAGE_URI"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter lane-detection-image-uri" in str(e)
def test_lane_detection_role(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IAM_ROLE"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter lane-detection-iam-role" in str(e)
def test_lane_detection_image_uri(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IMAGE_URI"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter lane-detection-image-uri" in str(e)
def test_vpc_id(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_VPC_ID"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter vpc-id" in str(e)
def test_private_subnet_ids(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"]
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "missing input parameter private-subnet-ids" in str(e)
def test_mwaa_exec_role(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_MWAA_EXEC_ROLE"]
with pytest.raises(ValueError) as e:
import app # noqa: F401
assert "MWAA Execution Role is missing." in str(e)
def test_full_access_policy(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"]
with pytest.raises(ValueError) as e:
import app # noqa: F401
assert "S3 Full Access Policy ARN is missing." in str(e)
def test_no_queue_provided():
del os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"]
del os.environ["SEEDFARMER_PARAMETER_SPOT_JOB_QUEUE_ARN"]
del os.environ["SEEDFARMER_PARAMETER_FARGATE_JOB_QUEUE_ARN"]
with pytest.raises(ValueError) as e:
import app # noqa: F401
assert "Requires at least one job queue." in str(e)
def test_image_topics_no_json(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_IMAGE_TOPICS"] = "no json"
with pytest.raises(JSONDecodeError):
import app # noqa: F401
def test_sensor_topics_no_json(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SENSOR_TOPICS"] = "no json"
with pytest.raises(JSONDecodeError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/test_stack.py | modules/analysis/rosbag-image-pipeline/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def iam_policy() -> dict:
return {
"Policies": [
{
"PolicyDocument": {
"Statement": [
{
"Action": "dynamodb:*",
"Effect": "Allow",
"Resource": {'Fn::Join': ['', ['arn:', {'Ref': 'AWS::Partition'}, ':dynamodb:us-east-1:111111111111:table/test-project-test-deployment-test-module*']]}
},
{
"Action": "ecr:*",
"Effect": "Allow",
"Resource": {'Fn::Join': ['', ['arn:', {'Ref': 'AWS::Partition'}, ':ecr:us-east-1:111111111111:repository/test-project-test-deployment-test-module*']]}
},
{
"Action": [
"batch:UntagResource",
"batch:DeregisterJobDefinition",
"batch:TerminateJob",
"batch:CancelJob",
"batch:SubmitJob",
"batch:RegisterJobDefinition",
"batch:TagResource",
],
"Effect": "Allow",
"Resource": [
"job-queue-1",
"job-queue-2",
"job-def-1",
"job-def-2",
{'Fn::Join': ['', ['arn:', {'Ref': 'AWS::Partition'}, ':batch:us-east-1:111111111111:job/*']]}
],
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": [
"obj-detection-role",
"lane-detection-role",
],
},
{
"Action": ["batch:Describe*", "batch:List*"],
"Effect": "Allow",
"Resource": "*",
},
{
"Action": [
"s3:GetObject",
"s3:GetObjectAcl",
"s3:ListBucket",
],
"Effect": "Allow",
"Resource": [
{'Fn::Join': ['', ['arn:', {'Ref': 'AWS::Partition'}, ':s3:::test-project-*']]},
{'Fn::Join': ['', ['arn:', {'Ref': 'AWS::Partition'}, ':s3:::test-project-*/*']]}
]
},
]
}
}
],
}
def test_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
rosbag_stack = stack.AwsBatchPipeline(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
vpc_id="vpc-id",
mwaa_exec_role="mwaa-exec-role",
bucket_access_policy="bucket-access-policy",
object_detection_role="obj-detection-role",
lane_detection_role="lane-detection-role",
job_queues=["job-queue-1", "job-queue-2"],
job_definitions=["job-def-1", "job-def-2"],
stack_description="Testing",
env=cdk.Environment(
account=(os.environ["CDK_DEFAULT_ACCOUNT"]),
region=(os.environ["CDK_DEFAULT_REGION"]),
),
)
template = Template.from_stack(rosbag_stack)
template.resource_count_is("AWS::DynamoDB::Table", 1)
template.resource_count_is("AWS::IAM::Role", 1)
template.has_resource_properties(
"AWS::IAM::Role",
{
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {"AWS": "mwaa-exec-role"},
}
],
"Version": "2012-10-17",
},
"ManagedPolicyArns": [
"bucket-access-policy",
{
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":iam::aws:policy/AmazonSageMakerFullAccess",
],
]
},
],
},
)
template.has_resource_properties("AWS::IAM::Role", iam_policy())
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/conftest.py | modules/analysis/rosbag-image-pipeline/tests/conftest.py | # conftest.py
import os
import random
import boto3
import moto
import pytest
from moto.server import ThreadedMotoServer
WD = os.getcwd()
MODULE_PATH = "modules/analysis/rosbag-image-pipeline"
if MODULE_PATH not in WD:
WD = f"{WD}/{MODULE_PATH}"
DAG_CONFIG_PATH = f"{WD}/image_dags/dag_config.py"
DAG_CONFIG_BACKUP_PATH = f"{WD}/image_dags/dag_config.bak"
def pytest_configure(config):
"""
Allows plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest
file after command line options have been parsed.
"""
os.rename(DAG_CONFIG_PATH, DAG_CONFIG_BACKUP_PATH)
sample_data = """SEEDFARMER_MODULE_METADATA = '{"PrivateSubnetIds":["subnet-090a22976151932d7","subnet-0d0f12bd07e5ed4ea","subnet-011bad0900787e44e"],"DagId":"vsi_image_pipeline","SecurityGroupId":"sg-08460867be55fd219","DagRoleArn":"arn:aws:iam::1234567890:role/addf-aws-solutions-analysis-rip-dag-us-east-1","DynamoDbTableName":"addf-aws-solutions-analysis-rip-drive-tracking","DetectionsDynamoDBName":"addf-aws-solutions-core-metadata-storage-Rosbag-Scene-Metadata","SourceBucketName":"addf-aws-solutions-raw-bucket-074ff5b4","TargetBucketName":"addf-aws-solutions-intermediate-bucket-074ff5b4","DagBucketName":"addf-aws-solutions-artifacts-bucket-074ff5b4","LogsBucketName":"addf-aws-solutions-logs-bucket-074ff5b4","OnDemandJobQueueArn":"arn:aws:batch:us-east-1:1234567890:job-queue/addf-aws-solutions-core-batch-compute-OnDemandJobQueue","SpotJobQueueArn":"arn:aws:batch:us-east-1:1234567890:job-queue/addf-aws-solutions-core-batch-compute-SpotJobQueue","FargateJobQueueArn":"arn:aws:batch:us-east-1:1234567890:job-queue/addf-aws-solutions-core-batch-compute-FargateJobQueue","ParquetBatchJobDefArn":"arn:aws:batch:us-east-1:1234567890:job-definition/addf-aws-solutions-docker-images-ros-to-parquet:1","PngBatchJobDefArn":"arn:aws:batch:us-east-1:1234567890:job-definition/addf-aws-solutions-docker-images-ros-to-png:1","ObjectDetectionImageUri":"1234567890.dkr.ecr.us-east-1.amazonaws.com/addf-aws-solutions-docker-images-object-detection:latest","ObjectDetectionRole":"arn:aws:iam::1234567890:role/addf-aws-solutions-docker-addfawssolutionsdockerim-1WI5F9LEEAN39","ObjectDetectionJobConcurrency":30,"ObjectDetectionInstanceType":"ml.m5.xlarge","LaneDetectionImageUri":"1234567890.dkr.ecr.us-east-1.amazonaws.com/addf-aws-solutions-docker-images-lane-detection:smprocessor","LaneDetectionRole":"arn:aws:iam::1234567890:role/addf-aws-solutions-docker-addfawssolutionsdockerim-1U2OPJ0QGMLSM","LaneDetectionJobConcurrency":20,"LaneDetectionInstanceType":"ml.m5.2xlarge","FileSuffix":".bag","DesiredEncoding":"bgr8","YoloModel":"yolov5s","ImageTopics":["/flir_adk/rgb_front_left/image_raw","/flir_adk/rgb_front_right/image_raw"],"SensorTopics":["/vehicle/gps/fix","/vehicle/gps/time","/vehicle/gps/vel","/imu_raw"]}'
DEPLOYMENT_NAME = 'aws-solutions'
MODULE_NAME = 'analysis-rip'
REGION = 'us-east-1'
EMR_JOB_EXECUTION_ROLE = 'arn:aws:iam::1234567890:role/addf-aws-solutions-core-e-addfawssolutionscoreemrs-KNO49U56R3MO'
EMR_APPLICATION_ID = '00fcldsv5ol5dv09'
SOLUTION_ID = 'SO12345'
SOLUTION_NAME = 'Go ADDF!'
SOLUTION_VERSION = 'v1.0.0'
S3_SCRIPT_DIR = 's3://addf-aws-solutions-artifacts-bucket-074ff5b4/dags/aws-solutions/analysis-rip/image_dags/'
"""
# Writing to sample.json
with open(DAG_CONFIG_PATH, "w") as outfile:
outfile.write(sample_data)
def pytest_unconfigure(config):
"""
called before test process is exited.
"""
os.remove(DAG_CONFIG_PATH)
os.rename(DAG_CONFIG_BACKUP_PATH, DAG_CONFIG_PATH)
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
os.environ["AWS_DEFAULT_REGION"] = "us-west-2"
os.environ["MOTO_ACCOUNT_ID"] = "123456789012"
@pytest.fixture(scope="function")
def moto_dynamodb(aws_credentials):
with moto.mock_dynamodb():
dynamodb = boto3.resource("dynamodb")
dynamodb.create_table(
TableName="mytable",
KeySchema=[{"AttributeName": "pk", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "pk", "AttributeType": "N"}],
BillingMode="PAY_PER_REQUEST",
)
yield dynamodb
@pytest.fixture(scope="function")
def moto_s3(aws_credentials):
with moto.mock_s3():
s3 = boto3.client("s3")
try:
s3.create_bucket(
Bucket="mybucket",
CreateBucketConfiguration={"LocationConstraint": "us-west-2"},
)
except Exception as e:
print(f"bucket creation failed: {e}")
yield s3
@pytest.fixture(scope="function")
def moto_server():
port = random.randint(5001, 8999)
server = ThreadedMotoServer(port=port)
server.start()
yield port
server.stop()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/__init__.py | modules/analysis/rosbag-image-pipeline/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/test_detect_scenes.py | modules/analysis/rosbag-image-pipeline/tests/test_detect_scenes.py | import os
from image_dags.detect_scenes import *
def create_spark_session(port: int):
os.environ["PYSPARK_SUBMIT_ARGS"] = '--packages "org.apache.hadoop:hadoop-aws:3.3.1" pyspark-shell'
spark = SparkSession.builder.getOrCreate()
hadoop_conf = spark.sparkContext._jsc.hadoopConfiguration()
hadoop_conf.set("fs.s3a.access.key", "dummy-value")
hadoop_conf.set("fs.s3a.secret.key", "dummy-value")
hadoop_conf.set("fs.s3a.endpoint", f"http://127.0.0.1:{port}")
hadoop_conf.set("fs.s3.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
return spark
def test_get_batch_file_metadata(moto_dynamodb):
dynamodb = boto3.resource("dynamodb")
table_name = "mytable"
table = dynamodb.Table(table_name)
items = [{"pk": 1}, {"pk": 2}]
for item in items:
table.put_item(Item=item)
for item in items:
result = get_batch_file_metadata(table_name, item["pk"], os.getenv("AWS_DEFAULT_REGION"))
assert len(result) > 0
def test_detect_scenes_parse_arguments():
args = parse_arguments(
[
"--batch-metadata-table-name",
"dummy",
"--batch-id",
"dummy",
"--input-bucket",
"mybucket",
"--output-bucket",
"mybucket",
"--output-dynamo-table",
"mytable",
"--region",
"us-east-1",
"--image-topics",
'["/flir_adk/rgb_front_left/image_raw", "/flir_adk/rgb_front_right/image_raw"]',
]
)
assert args.batch_metadata_table_name == "dummy"
assert args.batch_id == "dummy"
assert args.input_bucket == "mybucket"
assert args.output_bucket == "mybucket"
assert args.output_dynamo_table == "mytable"
assert args.region == "us-east-1"
assert args.image_topics == '["/flir_adk/rgb_front_left/image_raw", "/flir_adk/rgb_front_right/image_raw"]'
def test_load_lane_detection(moto_server):
port = moto_server
s3 = boto3.resource("s3", endpoint_url=f"http://127.0.0.1:{port}")
# create an S3 bucket.
bucket_name = "lane-detection-bucket"
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": os.getenv("AWS_DEFAULT_REGION")},
)
object = s3.Object(
bucket_name,
"test-vehicle-02/small2__2020-11-19-16-21-22_4/_flir_adk_rgb_front_left_image_raw_resized_1280_720_post_lane_dets/lanes.csv",
)
data = b"lanes"
object.put(Body=data)
spark = create_spark_session(port=port)
sample_metadata = [
{
"raw_image_bucket": bucket_name,
"drive_id": "test-vehichle-01",
"file_id": "this.jpg",
"s3_bucket": bucket_name,
"s3_key": "test-vehichle-01/this/_flir_adk_rgb_front_right_image_raw_resized_/1280_720_post_lane_dets/lanes.csv",
"resized_image_dirs": [
"test-vehicle-02/small2__2020-11-19-16-21-22_4/_flir_adk_rgb_front_left_image_raw_resized_1280_720",
"test-vehicle-02/small2__2020-11-19-16-21-22_4/_flir_adk_rgb_front_right_image_raw_resized_1280_720",
],
}
]
load_lane_detection(spark, sample_metadata)
spark.stop()
def test_load_obj_detection(moto_server):
port = moto_server
s3 = boto3.resource("s3", endpoint_url=f"http://127.0.0.1:{port}")
s3.create_bucket(
Bucket="mybucket2",
CreateBucketConfiguration={"LocationConstraint": os.getenv("AWS_DEFAULT_REGION")},
)
object = s3.Object(
"mybucket2",
"test-vehicle-02/small2__2020-11-19-16-21-22_4/_flir_adk_rgb_front_left_image_raw_resized_1280_720_post_obj_dets/all_predictions.csv",
)
data = b"all_predictions"
object.put(Body=data)
spark = create_spark_session(port=port)
sample_metadata = [
{
"raw_image_bucket": "mybucket2",
"drive_id": "test-vehichle-01",
"file_id": "this.jpg",
"resized_image_dirs": [
"test-vehicle-02/small2__2020-11-19-16-21-22_4/_flir_adk_rgb_front_left_image_raw_resized_1280_720",
],
}
]
load_obj_detection(spark, sample_metadata, None)
spark.stop()
def test_write_results_to_s3(moto_server):
port = moto_server
s3 = boto3.resource("s3", endpoint_url=f"http://127.0.0.1:{port}")
s3.create_bucket(
Bucket="outputbucket",
CreateBucketConfiguration={"LocationConstraint": os.getenv("AWS_DEFAULT_REGION")},
)
spark = create_spark_session(port=port)
df = spark.createDataFrame(
[
(1, "foo"),
(2, "bar"),
],
["id", "bag_file"],
)
dfs = {"test": df}
write_results_s3(
dfs,
table_name="scene_detections",
output_bucket="outputbucket",
partition_cols=["bag_file"],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/test_batch_creation_and_tracking.py | modules/analysis/rosbag-image-pipeline/tests/test_batch_creation_and_tracking.py | from image_dags.batch_creation_and_tracking import *
def test_get_batch_file_metadata(moto_dynamodb):
drives_and_files = {"foo": {"files": ["dummyfile"], "bucket": "dummybucket"}}
batch_write_files_to_dynamo(
table=moto_dynamodb.Table("mytable"),
drives_and_files=drives_and_files,
batch_id=1010,
)
def test_get_drive_files(moto_s3):
get_drive_files(
src_bucket="mybucket",
src_prefix="files/",
file_suffix=".bag",
s3_client=moto_s3,
)
def test_add_drives_to_batch(moto_dynamodb, moto_s3):
table = moto_dynamodb.Table("mytable")
drives_to_process = {
"drive1": {"bucket": "mybucket", "prefix": "rosbag-scene-detection/drive1/"},
"drive2": {"bucket": "mybucket", "prefix": "rosbag-scene-detection/drive2/"},
}
add_drives_to_batch(
table=table,
batch_id=1010,
drives_to_process=drives_to_process,
file_suffix=".bag",
s3_client=moto_s3,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/tests/test_ros_image_pipeline.py | modules/analysis/rosbag-image-pipeline/tests/test_ros_image_pipeline.py | from image_dags.ros_image_pipeline import *
def test_validate_config():
input = {
"drive2": {
"bucket": "addf-ros-image-demo-raw-bucket-d2be7d29",
"prefix": "rosbag-scene-detection/drive2/",
},
}
validate_config(input)
def test_get_job_name():
job_name = get_job_name("foobar")
assert job_name.startswith("ros-image-pipeline-foobar-")
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/image_dags/dag_config.py | modules/analysis/rosbag-image-pipeline/image_dags/dag_config.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# This file is populated with configurations information when the Module is deployed
# Configuration parameters are exported as module level constants
#
# Example:
# SOME_PARAMETER = 'some value'
# DUMPS PARAMETERS FROM app.py IN ../deployspec.yaml like:
# - echo "DEPLOYMENT_NAME = '${SEEDFARMER_DEPLOYMENT_NAME}'" >> image_dags/dag_config.py
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/image_dags/detect_scenes.py | modules/analysis/rosbag-image-pipeline/image_dags/detect_scenes.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import sys
from typing import Any, Dict, List
import boto3
import pyspark.sql.functions as func
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import (
aggregate,
col,
concat,
count,
first,
from_json,
lit,
split,
)
from pyspark.sql.types import ArrayType, DoubleType, IntegerType, StringType, StructField, StructType
obj_schema = StructType(
[
StructField("_c0", IntegerType(), True),
StructField("xmin", DoubleType(), True),
StructField("ymin", DoubleType(), True),
StructField("xmax", DoubleType(), True),
StructField("ymax", DoubleType(), True),
StructField("confidence", DoubleType(), True),
StructField("class", IntegerType(), True),
StructField("name", StringType(), True),
StructField("source_image", StringType(), True),
]
)
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument("--batch-metadata-table-name", required=True)
parser.add_argument("--batch-id", required=True)
parser.add_argument("--input-bucket", required=False)
parser.add_argument("--output-bucket", required=True)
parser.add_argument("--output-dynamo-table", required=True)
parser.add_argument("--region", required=True)
parser.add_argument("--image-topics", required=False)
return parser.parse_args(args=args)
def form_object_in_lane_df(obj_df, lane_df):
obj_lane_df = obj_df.join(lane_df, on="source_image", how="inner")
obj_lane_df = (
obj_lane_df.withColumn(
"pixel_rows_at_bottom_corner",
obj_lane_df["lanes"].getItem(obj_lane_df.ymax.cast(IntegerType())),
)
.drop("lanes")
.drop("ymax")
)
obj_lane_df = obj_lane_df.withColumn(
"sum_of_pixels_intensities_at_bottom_corner_rows",
aggregate("pixel_rows_at_bottom_corner", lit(0), lambda acc, x: acc + x),
).drop("pixel_rows_at_bottom_corner")
obj_lane_df = obj_lane_df.filter(obj_lane_df.sum_of_pixels_intensities_at_bottom_corner_rows != 0)
obj_lane_df = obj_lane_df.withColumn("source_image_split", split("source_image", "_"))
obj_lane_df = obj_lane_df.withColumn(
"Time",
concat(
obj_lane_df["source_image_split"].getItem(2),
lit("."),
obj_lane_df["source_image_split"].getItem(3).substr(1, 2),
).cast(DoubleType()),
)
return obj_lane_df
def get_batch_file_metadata(table_name, batch_id, region):
dynamodb = boto3.resource("dynamodb", region_name=region)
table = dynamodb.Table(table_name)
response = table.query(KeyConditions={"pk": {"AttributeValueList": [batch_id], "ComparisonOperator": "EQ"}})
data = response["Items"]
while "LastEvaluatedKey" in response:
response = table.query(ExclusiveStartKey=response["LastEvaluatedKey"])
data.update(response["Items"])
return data
def load_obj_detection(spark, batch_metadata, image_topics):
path_list = []
def _process(
image_topics: List[str],
path_list: List[str],
item: Dict[str, Any],
resizied_image_dir: str,
) -> None:
for t in image_topics:
path_list.append(
f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_obj_dets/all_predictions.csv"
) if t in resizied_image_dir else None
for item in batch_metadata:
for resizied_image_dir in item["resized_image_dirs"]:
if image_topics and len(image_topics) > 0:
_process(image_topics, path_list, item, resizied_image_dir)
else:
path_list.append(
f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_obj_dets/all_predictions.csv"
)
def remove_space(name):
return name.replace(" ", "_")
remove_space_udf = func.udf(remove_space, StringType())
df = spark.read.schema(obj_schema).option("header", True).csv(path_list)
df2 = df.withColumn("name", remove_space_udf(df.name))
print("Number of rows in Object Detection dataframe")
print(df2.count())
return df2
def load_lane_detection(spark, batch_metadata):
first_item = batch_metadata[0]
first_path_prefix = first_item["resized_image_dirs"][0]
first_path = f"s3://{first_item['raw_image_bucket']}/{first_path_prefix}_post_lane_dets/lanes.csv"
first_item_split = first_item["s3_key"].rpartition("/")
bag_file_prefix = first_item_split[0]
bag_file = first_item_split[2].split(".")[0]
df = (
spark.read.option("header", True)
.csv(first_path)
.withColumn("bag_file_bucket", lit(first_item["s3_bucket"]))
.withColumn("bag_file", lit(bag_file))
.withColumn("bag_file_prefix", lit(bag_file_prefix))
)
for i in range(1, len(batch_metadata)):
item = batch_metadata[i]
for resizied_image_dir in item["resized_image_dirs"]:
path = f"s3://{item['raw_image_bucket']}/{resizied_image_dir}_post_lane_dets/lanes.csv"
item_split = item["s3_key"].rpartition("/")
bag_file_prefix = item_split[0]
bag_file = item_split[2].split(".")[0]
df.union(
spark.read.option("header", True)
.csv(path)
.withColumn("bag_file_bucket", lit(item["s3_bucket"]))
.withColumn("bag_file", lit(bag_file))
.withColumn("bag_file_prefix", lit(bag_file_prefix))
)
lane_schema = ArrayType(ArrayType(IntegerType()), False)
df = df.withColumn("lanes", from_json(col("lanes"), lane_schema))
return df
def write_results_s3(dfs: Dict[str, Any], table_name: str, output_bucket: str, partition_cols=[]):
for key, df in dfs.items():
s3_path = f"s3://{output_bucket}/{table_name}/{key}"
df.write.mode("append").partitionBy(*partition_cols).parquet(s3_path)
def write_results_dynamo(dfs, output_dynamo_table, region):
for key, df in dfs.items():
print(f"Writing {key} to DDB")
df.write.mode("append").option("tableName", output_dynamo_table).option("region", region).format(
"dynamodb"
).save()
def summarize_obj_in_lane_scenes(obj_lane_df, image_topics, obj_type):
obj_lane_df = obj_lane_df.filter(obj_lane_df.name == obj_type)
obj_lane_df = obj_lane_df.groupby("source_image").agg(
count("name").alias(f"num_{obj_type}_in_lane"),
first("Time").alias("Time"),
first("bag_file_bucket").alias("bag_file_bucket"),
first("bag_file").alias("bag_file"),
first("bag_file_prefix").alias("bag_file_prefix"),
)
win = Window.orderBy("Time").partitionBy("bag_file_bucket", "bag_file", "bag_file_prefix")
obj_lane_df = (
obj_lane_df.withColumn(
f"num_{obj_type}_in_lane_lag1",
func.lag(func.col(f"num_{obj_type}_in_lane"), 1, 0).over(win),
)
.withColumn(
f"num_{obj_type}_in_lane_lead1",
func.lead(func.col(f"num_{obj_type}_in_lane"), 1, 0).over(win),
)
.filter(f"num_{obj_type}_in_lane_lag1 == 0 or num_{obj_type}_in_lane_lead1 ==0")
)
scene_state_udf = func.udf(
lambda num, lag: "start" if num > 0 and lag is None else ("end" if num == 0 and lag > 0 else None),
StringType(),
)
obj_in_lane_scenes_df = (
obj_lane_df.withColumn(
"scene_state",
scene_state_udf(
obj_lane_df[f"num_{obj_type}_in_lane"],
obj_lane_df[f"num_{obj_type}_in_lane_lag1"],
),
)
.withColumn("end_time", func.lead(func.col("Time"), 1).over(win))
.filter(f"num_{obj_type}_in_lane_lag1 ==0")
.withColumnRenamed("Time", "start_time")
.withColumnRenamed(f"num_{obj_type}_in_lane", f"num_{obj_type}_in_lane_start")
.select(
"bag_file",
"bag_file_bucket",
"bag_file_prefix",
"start_time",
"end_time",
f"num_{obj_type}_in_lane_start",
)
.withColumn(
"scene_id",
func.concat(
func.col("bag_file"),
func.lit(f"_{obj_type}InLane_"),
func.col("start_time"),
),
)
.withColumn("scene_length", func.col("end_time") - func.col("start_time"))
.withColumn("topics_analyzed", func.lit(",".join(image_topics)))
)
return obj_in_lane_scenes_df
def main(
batch_metadata_table_name,
batch_id,
output_bucket,
output_dynamo_table,
spark,
region,
image_topics,
):
# Load files to process
batch_metadata = get_batch_file_metadata(table_name=batch_metadata_table_name, batch_id=batch_id, region=region)
if image_topics:
image_topics = json.loads(image_topics)
image_topics = [topic.replace("/", "_") for topic in image_topics if image_topics]
# Load topic data from s3 and union
obj_df = load_obj_detection(spark, batch_metadata=batch_metadata, image_topics=image_topics)
lane_df = load_lane_detection(
spark,
batch_metadata=batch_metadata,
)
obj_lane_df = form_object_in_lane_df(obj_df, lane_df)
dfs = {}
dfs["car"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "car")
dfs["truck"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "truck")
dfs["traffic_light"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "traffic_light")
dfs["train"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "train")
dfs["bus"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "bus")
dfs["motorcycle"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "motorcycle")
dfs["stop_sign"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "stop_sign")
dfs["fire_hydrant"] = summarize_obj_in_lane_scenes(obj_lane_df, image_topics, "fire_hydrant")
write_results_s3(
dfs,
table_name="scene_detections",
output_bucket=output_bucket,
partition_cols=["bag_file"],
)
write_results_dynamo(dfs, output_dynamo_table, region)
if __name__ == "__main__":
spark = SparkSession.builder.appName("scene-detection").getOrCreate()
sc = spark.sparkContext
arguments = parse_arguments(sys.argv[1:])
batch_metadata_table_name = arguments.batch_metadata_table_name
batch_id = arguments.batch_id
output_bucket = arguments.output_bucket
output_dynamo_table = arguments.output_dynamo_table
region = arguments.region
image_topics = arguments.image_topics if arguments.image_topics else None
main(
batch_metadata_table_name,
batch_id,
output_bucket,
output_dynamo_table,
spark,
region,
image_topics,
)
sc.stop()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/image_dags/batch_creation_and_tracking.py | modules/analysis/rosbag-image-pipeline/image_dags/batch_creation_and_tracking.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import typing
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
def add_drives_to_batch(
table: str,
batch_id: str,
drives_to_process: typing.Dict[str, dict],
file_suffix: str,
s3_client,
):
"""Lists files with file_suffix for each prefix in drives_to_process and adds each file to dynamodb for tracking
@param table: dynamo tracking table
@param batch_id: dag run id
@param drives_to_process: {
"drives_to_process": {
"drive1": {"bucket": "addf-example-dev-raw-bucket-xyz", "prefix": "rosbag-scene-detection/drive1/"},
"drive2": {"bucket": "addf-example-dev-raw-bucket-xyz", "prefix": "rosbag-scene-detection/drive2/"},
},
}
@param file_suffix: ".bag"
@param s3_client: type boto3.client('s3')
@return:
"""
drives_and_files = {}
files_in_batch = 0
for drive_id, s3_path in drives_to_process.items():
files = get_drive_files(
src_bucket=s3_path["bucket"],
src_prefix=s3_path["prefix"],
file_suffix=file_suffix,
s3_client=s3_client,
)
drives_and_files[drive_id] = {"files": files, "bucket": s3_path["bucket"]}
files_in_batch += len(files)
logger.info(f"files_in_batch {files_in_batch}")
batch_write_files_to_dynamo(table, drives_and_files, batch_id)
return files_in_batch
def get_drive_files(src_bucket, src_prefix, file_suffix, s3_client):
"""For a given bucket, prefix, and suffix, lists all files found on S3 and returns a list of the files
@param drive_id:
@param src_bucket:
@param src_prefix:
@param file_suffix:
@param s3_client:
@return:
"""
MAX_KEYS = 1000
logger.info(src_bucket)
logger.info(src_prefix)
file_response = s3_client.list_objects_v2(Bucket=src_bucket, Prefix=src_prefix, MaxKeys=MAX_KEYS, Delimiter="/")
logger.info(file_response)
file_next_continuation = file_response.get("NextContinuationToken")
files = [x["Key"] for x in file_response.get("Contents", []) if x["Key"].endswith(file_suffix)]
while file_next_continuation is not None:
file_response = s3_client.list_objects_v2(
Bucket=src_bucket,
Prefix=src_prefix,
MaxKeys=MAX_KEYS,
Delimiter="/",
ContinuationToken=file_next_continuation,
)
file_next_continuation = file_response.get("NextContinuationToken")
files += [x["Key"] for x in file_response.get("Contents", [])]
logger.info(files)
return files
def batch_write_files_to_dynamo(table, drives_and_files, batch_id):
with table.batch_writer() as batch:
idx = 0
for drive_id, files in drives_and_files.items():
for file in files["files"]:
item = {
"drive_id": drive_id,
"file_id": file.split("/")[-1],
"s3_bucket": files["bucket"],
"s3_key": file,
"pk": batch_id,
"sk": str(idx),
}
logger.info(item)
batch.put_item(Item=item)
idx += 1
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/image_dags/ros_image_pipeline.py | modules/analysis/rosbag-image-pipeline/image_dags/ros_image_pipeline.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import random
import string
import sys
import time
from datetime import timedelta
from math import ceil
from typing import TypeVar
import boto3
import botocore
from airflow import DAG, settings
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.operators.python import PythonOperator, get_current_context
from airflow.providers.amazon.aws.operators.batch import BatchOperator
from airflow.providers.amazon.aws.operators.emr import EmrServerlessStartJobOperator
from airflow.providers.amazon.aws.sensors.emr import EmrServerlessJobSensor
from airflow.utils.dates import days_ago
from airflow.utils.task_group import TaskGroup
from boto3.dynamodb.conditions import Key
from sagemaker.network import NetworkConfig
from sagemaker.processing import ProcessingInput, ProcessingOutput, Processor
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from batch_creation_and_tracking import add_drives_to_batch
from dag_config import (
EMR_APPLICATION_ID,
EMR_JOB_EXECUTION_ROLE,
REGION,
S3_SCRIPT_DIR,
SEEDFARMER_MODULE_METADATA,
SOLUTION_ID,
SOLUTION_VERSION,
)
# GET MODULE VARIABLES FROM APP.PY AND DEPLOYSPEC
module_metadata = json.loads(SEEDFARMER_MODULE_METADATA)
DAG_ID = module_metadata["DagId"]
DAG_ROLE = module_metadata["DagRoleArn"]
DYNAMODB_TABLE = module_metadata["DynamoDbTableName"]
FARGATE_JOB_QUEUE_ARN = module_metadata["FargateJobQueueArn"]
ON_DEMAND_JOB_QUEUE_ARN = module_metadata["OnDemandJobQueueArn"]
SPOT_JOB_QUEUE_ARN = module_metadata["SpotJobQueueArn"]
TARGET_BUCKET = module_metadata["TargetBucketName"]
FILE_SUFFIX = module_metadata["FileSuffix"]
PRIVATE_SUBNETS_IDS = module_metadata["PrivateSubnetIds"]
SM_SECURITY_GROUP_ID = module_metadata["SecurityGroupId"]
PNG_JOB_DEFINITION_ARN = module_metadata["PngBatchJobDefArn"]
DESIRED_ENCODING = module_metadata["DesiredEncoding"]
IMAGE_TOPICS = module_metadata["ImageTopics"]
PARQUET_JOB_DEFINITION_ARN = module_metadata["ParquetBatchJobDefArn"]
SENSOR_TOPICS = module_metadata["SensorTopics"]
YOLO_IMAGE_URI = module_metadata["ObjectDetectionImageUri"]
YOLO_ROLE = module_metadata["ObjectDetectionRole"]
YOLO_CONCURRENCY = module_metadata["ObjectDetectionJobConcurrency"]
YOLO_INSTANCE_TYPE = module_metadata["ObjectDetectionInstanceType"]
YOLO_MODEL = module_metadata["YoloModel"]
LANEDET_IMAGE_URI = module_metadata["LaneDetectionImageUri"]
LANEDET_ROLE = module_metadata["LaneDetectionRole"]
LANEDET_CONCURRENCY = module_metadata["LaneDetectionJobConcurrency"]
LANEDET_INSTANCE_TYPE = module_metadata["LaneDetectionInstanceType"]
# EMR Config
# spark_app_dir = f"s3://{module_metadata['DagBucketName']}/spark_jobs/"
# EMR_VIRTUAL_CLUSTER_ID = module_metadata['EmrVirtualClusterId']
# EMR_JOB_ROLE_ARN = module_metadata['EmrJobRoleArn']
# ARTIFACT_BUCKET = module_metadata["DagBucketName"]
LOGS_BUCKET = module_metadata["LogsBucketName"]
SCENE_TABLE = module_metadata["DetectionsDynamoDBName"]
CONFIGURATION_OVERRIDES = {
"monitoringConfiguration": {
"managedPersistenceMonitoringConfiguration": {"enabled": True},
"s3MonitoringConfiguration": {"logUri": f"s3://{LOGS_BUCKET}/scene-detection"},
}
}
ValueType = TypeVar("ValueType")
TASK_DEF_XCOM_KEY = "job_definition_arn"
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
def get_config() -> botocore.config.Config:
if SOLUTION_ID and SOLUTION_VERSION:
user_agent_extra_param = {"user_agent_extra": f"AWSSOLUTION/{SOLUTION_ID}/{SOLUTION_VERSION}"}
return botocore.config.Config(**user_agent_extra_param)
else:
return botocore.config.Config()
def try_create_aws_conn():
conn_id = "aws_default"
try:
AwsHook.get_connection(conn_id)
except AirflowException:
extra = json.dumps({"role_arn": DAG_ROLE}, indent=2)
conn = Connection(conn_id=conn_id, conn_type="aws", host="", schema="", login="", extra=extra)
try:
session = settings.Session()
session.add(conn)
session.commit()
finally:
session.close()
def validate_config(drives_to_process):
example_input = {
"drives_to_process": {
"drive2": {
"bucket": "addf-ros-image-demo-raw-bucket-d2be7d29",
"prefix": "rosbag-scene-detection/drive2/",
},
}
}
for k, v in drives_to_process.items():
assert isinstance(k, str), f"expecting config to be like {example_input}, received: {drives_to_process}"
assert (
"bucket" in v.keys() and "prefix" in v.keys()
), f"expecting config to be like {example_input}, received: {drives_to_process}"
assert v["prefix"][-1] == "/"
def create_batch_of_drives(ti, **kwargs):
"""
if Batch Id already exists, then run dag again on exact same files
Else:
List Drive Folders in S3
For New Drives, List Recording Files
Put New Drives and Recording Files in Dynamo and Assign to a batch if not assigned already
Add Drives until reaching max files allowed in 1 batch (hard limit of 10k)
"""
drives_to_process = kwargs["dag_run"].conf["drives_to_process"]
batch_id = kwargs["dag_run"].run_id
# Establish AWS API Connections
sts_client = boto3.client("sts")
assumed_role_object = sts_client.assume_role(RoleArn=DAG_ROLE, RoleSessionName="AssumeRoleSession1")
credentials = assumed_role_object["Credentials"]
dynamodb = boto3.resource(
"dynamodb",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
config=get_config(),
)
s3_client = boto3.client(
"s3",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
config=get_config(),
)
# Validate Config
validate_config(drives_to_process)
table = dynamodb.Table(DYNAMODB_TABLE)
files_in_batch = table.query(
KeyConditionExpression=Key("pk").eq(batch_id),
Select="COUNT",
)["Count"]
if files_in_batch > 0:
logger.info("Batch Id already exists in tracking table - using existing batch")
return files_in_batch
logger.info("New Batch Id - collecting unprocessed drives from S3 and adding to the batch")
files_in_batch = add_drives_to_batch(
table=table,
drives_to_process=drives_to_process,
batch_id=batch_id,
file_suffix=FILE_SUFFIX,
s3_client=s3_client,
)
assert files_in_batch <= 10000, "AWS Batch Array Size cannot exceed 10000"
return files_in_batch
def get_job_name(suffix="") -> str:
v = "".join(random.choice(string.ascii_lowercase) for _i in range(6))
return f"ros-image-pipeline-{suffix}-{v}"
def png_batch_operation(**kwargs):
logger.info(f"kwargs at png_batch_operations is {kwargs}")
ti = kwargs["ti"]
array_size = ti.xcom_pull(task_ids="create-batch-of-drives", key="return_value")
batch_id = kwargs["dag_run"].run_id
context = get_current_context()
op = BatchOperator(
task_id="submit_batch_job_op",
job_name=get_job_name("png"),
job_queue=ON_DEMAND_JOB_QUEUE_ARN,
aws_conn_id="aws_default",
job_definition=PNG_JOB_DEFINITION_ARN,
array_properties={"size": int(array_size)},
overrides={
"environment": [
{"name": "TABLE_NAME", "value": DYNAMODB_TABLE},
{"name": "BATCH_ID", "value": batch_id},
{"name": "DEBUG", "value": "true"},
{"name": "IMAGE_TOPICS", "value": json.dumps(IMAGE_TOPICS)},
{"name": "DESIRED_ENCODING", "value": DESIRED_ENCODING},
{"name": "TARGET_BUCKET", "value": TARGET_BUCKET},
],
},
)
op.execute(context)
def parquet_operation(**kwargs):
ti = kwargs["ti"]
array_size = ti.xcom_pull(task_ids="create-batch-of-drives", key="return_value")
batch_id = kwargs["dag_run"].run_id
context = get_current_context()
op = BatchOperator(
task_id="submit_parquet_job_op",
job_name=get_job_name("parq"),
job_queue=FARGATE_JOB_QUEUE_ARN,
aws_conn_id="aws_default",
job_definition=PARQUET_JOB_DEFINITION_ARN,
array_properties={"size": int(array_size)},
overrides={
"environment": [
{"name": "TABLE_NAME", "value": DYNAMODB_TABLE},
{"name": "BATCH_ID", "value": batch_id},
{"name": "TOPICS", "value": json.dumps(SENSOR_TOPICS)},
{"name": "TARGET_BUCKET", "value": TARGET_BUCKET},
],
},
)
op.execute(context)
def sagemaker_yolo_operation(**kwargs):
# Establish AWS API Connections
sts_client = boto3.client("sts")
assumed_role_object = sts_client.assume_role(RoleArn=DAG_ROLE, RoleSessionName="AssumeRoleSession1")
credentials = assumed_role_object["Credentials"]
dynamodb = boto3.resource(
"dynamodb",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
config=get_config(),
)
table = dynamodb.Table(DYNAMODB_TABLE)
batch_id = kwargs["dag_run"].run_id
# Get Image Directories per Recording File to Label
image_directory_items = table.query(
KeyConditionExpression=Key("pk").eq(batch_id),
Select="SPECIFIC_ATTRIBUTES",
ProjectionExpression="resized_image_dirs",
)["Items"]
image_directories = []
for item in image_directory_items:
image_directories += item["resized_image_dirs"]
logger.info(f"Starting object detection job for {len(image_directories)} directories")
total_jobs = len(image_directories)
num_batches = ceil(total_jobs / YOLO_CONCURRENCY)
for i in range(num_batches):
logger.info(f"Starting object detection job for batch {i + 1} of {num_batches}")
processor = Processor(
image_uri=YOLO_IMAGE_URI,
role=YOLO_ROLE,
instance_count=1,
instance_type=YOLO_INSTANCE_TYPE,
base_job_name=f"{batch_id.replace(':', '').replace('_', '')[0:23]}-YOLO",
network_config=NetworkConfig(subnets=PRIVATE_SUBNETS_IDS, security_group_ids=[SM_SECURITY_GROUP_ID]),
)
idx_start = i * YOLO_CONCURRENCY
idx_end = (i + 1) * YOLO_CONCURRENCY
for image_directory in image_directories[idx_start:idx_end]:
logger.info(f"Starting object detection job for {image_directory}")
logger.info(
"Job details available at: "
f"https://{REGION}.console.aws.amazon.com/sagemaker/home?region={REGION}#/processing-jobs"
)
processor.run(
inputs=[
ProcessingInput(
input_name="data",
source=f"s3://{TARGET_BUCKET}/{image_directory}/",
destination="/opt/ml/processing/input/",
)
],
outputs=[
ProcessingOutput(
output_name="output",
source="/opt/ml/processing/output/",
destination=f"s3://{TARGET_BUCKET}/{image_directory}_post_obj_dets/",
)
],
arguments=["--model", YOLO_MODEL],
wait=False,
logs=False,
)
time.sleep(1) # Attempt at avoiding throttling exceptions
logger.info("Waiting on batch of jobs to finish")
for job in processor.jobs:
logger.info(f"Waiting on: {job} - logs from job:")
job.wait(logs=False)
logger.info("All object detection jobs complete")
def sagemaker_lanedet_operation(**kwargs):
# Establish AWS API Connections
sts_client = boto3.client("sts")
assumed_role_object = sts_client.assume_role(RoleArn=DAG_ROLE, RoleSessionName="AssumeRoleSession1")
credentials = assumed_role_object["Credentials"]
dynamodb = boto3.resource(
"dynamodb",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
config=get_config(),
)
table = dynamodb.Table(DYNAMODB_TABLE)
batch_id = kwargs["dag_run"].run_id
# Get Image Directories per Recording File to Label
image_directory_items = table.query(
KeyConditionExpression=Key("pk").eq(batch_id),
Select="SPECIFIC_ATTRIBUTES",
ProjectionExpression="resized_image_dirs",
)["Items"]
image_directories = []
for item in image_directory_items:
image_directories += item["resized_image_dirs"]
logger.info(f"Starting lane detection job for {len(image_directories)} directories")
total_jobs = len(image_directories)
num_batches = ceil(total_jobs / LANEDET_CONCURRENCY)
for i in range(num_batches):
logger.info(f"Starting lane detection job for batch {i + 1} of {num_batches}")
processor = Processor(
image_uri=LANEDET_IMAGE_URI,
role=LANEDET_ROLE,
instance_count=1,
instance_type=LANEDET_INSTANCE_TYPE,
base_job_name=f"{batch_id.replace(':', '').replace('_', '')[0:23]}-LANE",
network_config=NetworkConfig(subnets=PRIVATE_SUBNETS_IDS, security_group_ids=[SM_SECURITY_GROUP_ID]),
)
LOCAL_INPUT = "/opt/ml/processing/input/image"
LOCAL_OUTPUT = "/opt/ml/processing/output/image"
LOCAL_OUTPUT_JSON = "/opt/ml/processing/output/json"
LOCAL_OUTPUT_CSV = "/opt/ml/processing/output/csv"
idx_start = i * LANEDET_CONCURRENCY
idx_end = (i + 1) * LANEDET_CONCURRENCY
for image_directory in image_directories[idx_start:idx_end]:
logger.info(f"Starting lane detection job for {image_directory}")
logger.info(
"Job details available at: "
f"https://{REGION}.console.aws.amazon.com/sagemaker/home?region={REGION}#/processing-jobs"
)
processor.run(
arguments=[
"--save_dir",
LOCAL_OUTPUT,
"--source",
LOCAL_INPUT,
"--json_path",
LOCAL_OUTPUT_JSON,
"--csv_path",
LOCAL_OUTPUT_CSV,
],
inputs=[
ProcessingInput(
input_name="data",
source=f"s3://{TARGET_BUCKET}/{image_directory}/",
destination=LOCAL_INPUT,
)
],
outputs=[
ProcessingOutput(
output_name="image_output",
source=LOCAL_OUTPUT,
destination=f"s3://{TARGET_BUCKET}/{image_directory}_post_lane_dets/",
),
ProcessingOutput(
output_name="json_output",
source=LOCAL_OUTPUT_JSON,
destination=f"s3://{TARGET_BUCKET}/{image_directory}_post_lane_dets/",
),
ProcessingOutput(
output_name="csv_output",
source=LOCAL_OUTPUT_CSV,
destination=f"s3://{TARGET_BUCKET}/{image_directory}_post_lane_dets/",
),
],
wait=False,
logs=False,
)
time.sleep(1) # Attempt at avoiding throttling exceptions
logger.info("Waiting on batch of jobs to finish")
for job in processor.jobs:
logger.info(f"Waiting on: {job} - logs from job:")
job.wait(logs=False)
logger.info("All object detection jobs complete")
def emr_batch_operation(**kwargs):
ds = kwargs["ds"]
batch_id = kwargs["dag_run"].run_id
JOB_DRIVER = {
"sparkSubmit": {
"entryPoint": f"{S3_SCRIPT_DIR}detect_scenes.py",
"entryPointArguments": [
"--batch-metadata-table-name",
DYNAMODB_TABLE,
"--batch-id",
batch_id,
"--output-bucket",
TARGET_BUCKET,
"--region",
REGION,
"--output-dynamo-table",
SCENE_TABLE,
"--image-topics",
json.dumps(IMAGE_TOPICS),
],
"sparkSubmitParameters": f"--jars {S3_SCRIPT_DIR}spark-dynamodb_2.12-1.1.1.jar",
}
}
start_job_run_op = EmrServerlessStartJobOperator(
task_id="scene_detection",
application_id=EMR_APPLICATION_ID,
execution_role_arn=EMR_JOB_EXECUTION_ROLE,
job_driver=JOB_DRIVER,
configuration_overrides=CONFIGURATION_OVERRIDES,
aws_conn_id="aws_default",
)
job_run_id = start_job_run_op.execute(ds)
return job_run_id
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
render_template_as_native_obj=True,
) as dag:
create_aws_conn = PythonOperator(
task_id="try-create-aws-conn",
python_callable=try_create_aws_conn,
dag=dag,
)
create_batch_of_drives_task = PythonOperator(
task_id="create-batch-of-drives",
python_callable=create_batch_of_drives,
dag=dag,
provide_context=True,
)
# Start Task Group definition
with TaskGroup(group_id="sensor-extraction") as extract_task_group:
submit_png_job = PythonOperator(task_id="image-extraction-batch-job", python_callable=png_batch_operation)
submit_parquet_job = PythonOperator(task_id="parquet-extraction-batch-job", python_callable=parquet_operation)
create_batch_of_drives_task >> [submit_parquet_job, submit_png_job]
with TaskGroup(group_id="image-labelling") as image_labelling_task_group:
submit_yolo_job = PythonOperator(
task_id="object-detection-sagemaker-job",
python_callable=sagemaker_yolo_operation,
)
submit_lane_det_job = PythonOperator(
task_id="lane-detection-sagemaker-job",
python_callable=sagemaker_lanedet_operation,
)
with TaskGroup(group_id="scene-detection") as scene_detection_task_group:
start_job_run = PythonOperator(task_id="scene-detection", python_callable=emr_batch_operation)
job_sensor = EmrServerlessJobSensor(
task_id="check-emr-job-status",
application_id=EMR_APPLICATION_ID,
job_run_id="{{ task_instance.xcom_pull(task_ids='scene-detection.scene-detection', key='return_value') }}",
aws_conn_id="aws_default",
)
start_job_run >> job_sensor
create_aws_conn >> create_batch_of_drives_task >> extract_task_group
submit_png_job >> image_labelling_task_group >> scene_detection_task_group
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline/image_dags/__init__.py | modules/analysis/rosbag-image-pipeline/image_dags/__init__.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/optionals/datalake-buckets/stack.py | modules/optionals/datalake-buckets/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import hashlib
import logging
from typing import Any, cast
import aws_cdk
import aws_cdk.aws_iam as aws_iam
import aws_cdk.aws_s3 as aws_s3
import cdk_nag
from aws_cdk import Aspects, Duration, Stack, Tags
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class DataLakeBucketsStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
hash: str,
buckets_encryption_type: str,
buckets_retention: str,
artifacts_log_retention: int,
stack_description: str,
**kwargs: Any,
) -> None:
# CDK Env Vars
account: str = aws_cdk.Aws.ACCOUNT_ID
region: str = aws_cdk.Aws.REGION
partition: str = aws_cdk.Aws.PARTITION
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
# used to tag AWS resources. Tag Value length cant exceed 256 characters
full_dep_mod = dep_mod[:256] if len(dep_mod) > 256 else dep_mod
super().__init__(scope, id, description=stack_description, **kwargs)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=full_dep_mod)
logs_bucket_name = f"{project_name}-{deployment_name}-logs-bucket-{hash}"
hashlib.sha256()
unique_ab = (hashlib.sha256(module_name.encode("utf-8")).hexdigest())[: (60 - len(logs_bucket_name))]
logs_bucket = aws_s3.Bucket(
self,
id="logs-bucket",
bucket_name=f"{logs_bucket_name}-{unique_ab}",
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if buckets_retention.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
auto_delete_objects=None if buckets_retention.upper() == "RETAIN" else True,
# Encryption should be always set to AES256 for a bucket to receive access logging from target buckets
encryption=aws_s3.BucketEncryption.S3_MANAGED,
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
object_ownership=aws_s3.ObjectOwnership.BUCKET_OWNER_PREFERRED,
enforce_ssl=True,
# MWAA is very chatty, logs need to be cleaned via LifecycleRule
lifecycle_rules=[
aws_s3.LifecycleRule(
expiration=Duration.days(artifacts_log_retention),
enabled=True,
prefix="artifacts-bucket-logs/",
)
],
)
raw_bucket_name = f"{project_name}-{deployment_name}-raw-bucket-{hash}"
unique_ab = (hashlib.sha256(module_name.encode("utf-8")).hexdigest())[: (60 - len(raw_bucket_name))]
raw_bucket = aws_s3.Bucket(
self,
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if buckets_retention.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
bucket_name=f"{raw_bucket_name}-{unique_ab}",
versioned=True,
server_access_logs_bucket=logs_bucket,
server_access_logs_prefix="raw-bucket-logs/",
auto_delete_objects=None if buckets_retention.upper() == "RETAIN" else True,
id="raw-bucket",
encryption=aws_s3.BucketEncryption.KMS_MANAGED
if buckets_encryption_type.upper() == "KMS"
else aws_s3.BucketEncryption.S3_MANAGED,
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
enforce_ssl=True,
)
intermediate_bucket_name = f"{project_name}-{deployment_name}-intermediate-bucket-{hash}"
unique_ab = (hashlib.sha256(module_name.encode("utf-8")).hexdigest())[: (60 - len(intermediate_bucket_name))]
intermediate_bucket = aws_s3.Bucket(
self,
id="intermediate-bucket",
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if buckets_retention.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
bucket_name=f"{intermediate_bucket_name}-{unique_ab}",
versioned=True,
server_access_logs_bucket=logs_bucket,
server_access_logs_prefix="intermediate-bucket-logs/",
auto_delete_objects=None if buckets_retention.upper() == "RETAIN" else True,
encryption=aws_s3.BucketEncryption.KMS_MANAGED
if buckets_encryption_type.upper() == "KMS"
else aws_s3.BucketEncryption.S3_MANAGED,
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
enforce_ssl=True,
)
curated_bucket_name = f"{project_name}-{deployment_name}-curated-bucket-{hash}"
unique_ab = (hashlib.sha256(module_name.encode("utf-8")).hexdigest())[: (60 - len(curated_bucket_name))]
curated_bucket = aws_s3.Bucket(
self,
id="curated-bucket",
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if buckets_retention.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
bucket_name=f"{curated_bucket_name}-{unique_ab}",
versioned=True,
server_access_logs_bucket=logs_bucket,
server_access_logs_prefix="curated-bucket-logs/",
auto_delete_objects=None if buckets_retention.upper() == "RETAIN" else True,
encryption=aws_s3.BucketEncryption.KMS_MANAGED
if buckets_encryption_type.upper() == "KMS"
else aws_s3.BucketEncryption.S3_MANAGED,
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
enforce_ssl=True,
)
artifacts_bucket_name = f"{project_name}-{deployment_name}-artifacts-bucket-{hash}"
unique_ab = (hashlib.sha256(module_name.encode("utf-8")).hexdigest())[: (60 - len(artifacts_bucket_name))]
artifacts_bucket = aws_s3.Bucket(
self,
id="artifacts-bucket",
bucket_name=f"{artifacts_bucket_name}-{unique_ab}",
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if buckets_retention.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
auto_delete_objects=None if buckets_retention.upper() == "RETAIN" else True,
encryption=aws_s3.BucketEncryption.KMS_MANAGED
if buckets_encryption_type.upper() == "KMS"
else aws_s3.BucketEncryption.S3_MANAGED,
versioned=True,
server_access_logs_bucket=logs_bucket,
server_access_logs_prefix="artifacts-bucket-logs/",
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
enforce_ssl=True,
)
# ReadOnly IAM Policy
readonly_policy = aws_iam.ManagedPolicy(
self,
id="readonly_policy",
managed_policy_name=f"{project_name}-{deployment_name}-{module_name}-{region}-{account}-readonly-access",
statements=[
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"kms:Decrypt",
"kms:Encrypt",
"kms:ReEncrypt*",
"kms:DescribeKey",
"kms:GenerateDataKey",
],
resources=[f"arn:{partition}:kms::{account}:*"],
),
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"s3:GetObject",
"s3:GetObjectAcl",
"s3:ListBucket",
],
resources=[
f"{raw_bucket.bucket_arn}/*",
f"{raw_bucket.bucket_arn}",
f"{intermediate_bucket.bucket_arn}/*",
f"{intermediate_bucket.bucket_arn}",
f"{curated_bucket.bucket_arn}/*",
f"{curated_bucket.bucket_arn}",
f"{artifacts_bucket.bucket_arn}/*",
f"{artifacts_bucket.bucket_arn}",
],
),
],
)
# FullAccess IAM Policy
fullaccess_policy = aws_iam.ManagedPolicy(
self,
id="fullaccess_policy",
managed_policy_name=f"{project_name}-{deployment_name}-{module_name}-{region}-{account}-full-access",
statements=[
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"kms:Decrypt",
"kms:Encrypt",
"kms:ReEncrypt*",
"kms:DescribeKey",
"kms:GenerateDataKey",
],
resources=[f"arn:{partition}:kms::{account}:*"],
),
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"s3:GetObject",
"s3:GetObjectAcl",
"s3:ListBucket",
],
resources=[
f"{raw_bucket.bucket_arn}/*",
f"{raw_bucket.bucket_arn}",
f"{intermediate_bucket.bucket_arn}/*",
f"{intermediate_bucket.bucket_arn}",
f"{curated_bucket.bucket_arn}/*",
f"{curated_bucket.bucket_arn}",
f"{artifacts_bucket.bucket_arn}/*",
f"{artifacts_bucket.bucket_arn}",
],
),
aws_iam.PolicyStatement(
actions=["s3:PutObject", "s3:PutObjectAcl"],
resources=[
f"{raw_bucket.bucket_arn}/*",
f"{raw_bucket.bucket_arn}",
f"{intermediate_bucket.bucket_arn}/*",
f"{intermediate_bucket.bucket_arn}",
f"{curated_bucket.bucket_arn}/*",
f"{curated_bucket.bucket_arn}",
f"{artifacts_bucket.bucket_arn}/*",
f"{artifacts_bucket.bucket_arn}",
],
),
],
)
self.raw_bucket = raw_bucket
self.intermediate_bucket = intermediate_bucket
self.curated_bucket = curated_bucket
self.artifacts_bucket = artifacts_bucket
self.logs_bucket = logs_bucket
self.readonly_policy = readonly_policy
self.fullaccess_policy = fullaccess_policy
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
suppressions = [
NagPackSuppression(
**{
"id": "AwsSolutions-S1",
"reason": "Logging has been disabled for demo purposes",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced for demo resources",
}
),
]
NagSuppressions.add_stack_suppressions(self, suppressions)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/optionals/datalake-buckets/app.py | modules/optionals/datalake-buckets/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import DataLakeBucketsStack
# Project vars
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
hash = os.getenv("SEEDFARMER_HASH", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
# App Env vars
buckets_encryption_type = os.getenv(_param("ENCRYPTION_TYPE"), "SSE")
buckets_retention = os.getenv(_param("RETENTION_TYPE"), "DESTROY")
artifact_logs_retention = os.getenv(_param("ARTIFACTS_LOG_RETENTION"), "1")
if buckets_retention not in ["DESTROY", "RETAIN"]:
raise ValueError("The only RETENTION_TYPE values accepted are 'DESTROY' and 'RETAIN' ")
if buckets_encryption_type not in ["SSE", "KMS"]:
raise ValueError("The only ENCRYPTION_TYPE values accepted are 'SSE' and 'KMS' ")
def generate_description() -> str:
soln_id = os.getenv(_param("SOLUTION_ID"), None)
soln_name = os.getenv(_param("SOLUTION_NAME"), None)
soln_version = os.getenv(_param("SOLUTION_VERSION"), None)
desc = f"{project_name} - Datalake Buckets Module"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = DataLakeBucketsStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
hash=hash,
buckets_encryption_type=buckets_encryption_type,
buckets_retention=buckets_retention,
artifacts_log_retention=int(artifact_logs_retention),
stack_description=generate_description(),
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"ArtifactsBucketName": stack.artifacts_bucket.bucket_name,
"LogsBucketName": stack.logs_bucket.bucket_name,
"RawBucketName": stack.raw_bucket.bucket_name,
"IntermediateBucketName": stack.intermediate_bucket.bucket_name,
"CuratedBucketName": stack.curated_bucket.bucket_name,
"ReadOnlyPolicyArn": stack.readonly_policy.managed_policy_arn,
"FullAccessPolicyArn": stack.fullaccess_policy.managed_policy_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/optionals/datalake-buckets/tests/test_app.py | modules/optionals/datalake-buckets/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["SEEDFARMER_HASH"] = "hash"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ENCRYPTION_TYPE"] = "SSE"
os.environ["SEEDFARMER_PARAMETER_RETENTION_TYPE"] = "DESTROY"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_buckets_encryption_type(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ENCRYPTION_TYPE"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_ENCRYPTION_TYPE"] == "SSE"
def test_invalid_buckets_encryption_type(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_ENCRYPTION_TYPE"] = "notvalid"
with pytest.raises(Exception):
import app # noqa: F401
def test_buckets_retention(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_RETENTION_TYPE"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_RETENTION_TYPE"] == "DESTROY"
def test_invalid_retention_type(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_RETENTION_TYPE"] = "notvalid"
with pytest.raises(Exception):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/optionals/datalake-buckets/tests/test_stack.py | modules/optionals/datalake-buckets/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
metadata_storage_stack = stack.DataLakeBucketsStack(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
hash="hash",
buckets_encryption_type="SSE",
buckets_retention="DESTROY",
artifacts_log_retention=2,
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(metadata_storage_stack)
template.resource_count_is("AWS::S3::Bucket", 5)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/optionals/datalake-buckets/tests/__init__.py | modules/optionals/datalake-buckets/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/stack.py | modules/examples/example-spark-dags/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class DagIamRole(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
mwaa_exec_role: str,
raw_bucket_name: str,
**kwargs: Any,
) -> None:
# Env vars
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
self.mwaa_exec_role = mwaa_exec_role
super().__init__(
scope,
id,
description="This stack deploys Example Spark DAGs resources",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
# The below permissions is to deploy the `citibike` usecase declared in the below blogpost
# https://aws.amazon.com/blogs/big-data/manage-and-process-your-big-data-workflows-with-amazon-mwaa-and-amazon-emr-on-amazon-eks/
policy_statements = [
iam.PolicyStatement(
actions=["s3:ListBucket", "s3:GetObject*"],
effect=iam.Effect.ALLOW,
resources=["arn:aws:s3:::tripdata", "arn:aws:s3:::tripdata/*"],
),
iam.PolicyStatement(
actions=["s3:*"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{raw_bucket_name}",
f"arn:{self.partition}:s3:::{raw_bucket_name}/*",
],
),
iam.PolicyStatement(
actions=[
"emr-containers:StartJobRun",
"emr-containers:ListJobRuns",
"emr-containers:DescribeJobRun",
"emr-containers:CancelJobRun",
],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
actions=["kms:Decrypt", "kms:GenerateDataKey"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:kms:{self.region}:{self.account}:key/*"],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
r_name = f"{self.project_name}-{self.deployment_name}-{self.module_name}-dag-role"
self.dag_role = iam.Role(
self,
f"dag-role-{self.deployment_name}-{self.module_name}",
assumed_by=iam.ArnPrincipal(self.mwaa_exec_role),
inline_policies={"DagPolicyDocument": dag_document},
role_name=r_name,
path="/",
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
[
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced describe only",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/app.py | modules/examples/example-spark-dags/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import DagIamRole
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
mwaa_exec_role = os.getenv("SEEDFARMER_PARAMETER_MWAA_EXEC_ROLE_ARN", "")
raw_bucket_name = os.getenv("SEEDFARMER_PARAMETER_RAW_BUCKET_NAME", "")
app = App()
stack = DagIamRole(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
mwaa_exec_role=mwaa_exec_role,
raw_bucket_name=raw_bucket_name,
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string({"DagRoleArn": stack.dag_role.role_arn}),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/tests/test_stack.py | modules/examples/example-spark-dags/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/tests/__init__.py | modules/examples/example-spark-dags/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/example_spark_dags/citibike-spark-all.py | modules/examples/example-spark-dags/example_spark_dags/citibike-spark-all.py | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from pyspark.sql import SparkSession
from pyspark.sql.types import (
DateType,
DoubleType,
IntegerType,
StringType,
StructField,
StructType,
)
bucket = sys.argv[1]
spark = SparkSession.builder.appName("citibike").getOrCreate()
schema = StructType(
[
StructField("tripduration", IntegerType(), True),
StructField("starttime", DateType(), True),
StructField("stoptime", DateType(), True),
StructField("start_station_id", IntegerType(), True),
StructField("start_station_name", StringType(), True),
StructField("start_station_lat", DoubleType(), True),
StructField("start_station_lon", DoubleType(), True),
StructField("end_station_id", IntegerType(), True),
StructField("end_station_name", StringType(), True),
StructField("end_station_lat", DoubleType(), True),
StructField("end_station_lon", DoubleType(), True),
StructField("bike_id", IntegerType(), True),
StructField("usertype", StringType(), True),
StructField("birthyear", IntegerType(), True),
StructField("gender", IntegerType(), True),
]
)
df = spark.read.format("csv").option("header", "true").schema(schema).load("s3://" + bucket + "/citibike/csv/")
df.write.parquet("s3://" + bucket + "/citibike/parquet/", mode="overwrite")
#######################################
df = spark.read.format("parquet").load("s3://" + bucket + "/citibike/parquet/")
df.createOrReplaceTempView("citibike")
newdf = spark.sql("select *, extract(year from starttime) as yr,extract( month from starttime) as mo from citibike")
newdf.createOrReplaceTempView("newcitibike")
output = spark.sql("select count(*) as sum, mo from newcitibike group by mo order by sum desc")
output.coalesce(1).write.format("csv").option("header", "true").save(
"s3://" + bucket + "/citibike/results/ridership/", mode="overwrite"
)
output = spark.sql(
"select start_station_name,count(start_station_name) as counts, mo from newcitibike group by start_station_name,mo order by counts desc"
)
output.coalesce(1).write.format("csv").option("header", "true").save(
"s3://" + bucket + "/citibike/results/popular_start_stations/", mode="overwrite"
)
output = spark.sql(
"select end_station_name,count(end_station_name) as counts, mo from newcitibike group by end_station_name,mo order by counts desc"
)
output.coalesce(1).write.format("csv").option("header", "true").save(
"s3://" + bucket + "/citibike/results/popular_end_stations/", mode="overwrite"
)
output = spark.sql(
"select count(birthyear) as trips,mo as month,2021-birthyear as age from newcitibike group by age,month order by trips desc"
)
output.coalesce(1).write.format("csv").option("header", "true").save(
"s3://" + bucket + "/citibike/results/trips_by_age/", mode="overwrite"
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/example_spark_dags/citibike_all_dag.py | modules/examples/example-spark-dags/example_spark_dags/citibike_all_dag.py | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import random
import string
import zipfile
from datetime import datetime, timedelta
from io import BytesIO
import boto3
from airflow import DAG, settings
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.providers.amazon.aws.operators.emr_containers import EMRContainerOperator
from airflow.providers.amazon.aws.sensors.emr_containers import EMRContainerSensor
from boto3.session import Session
from example_spark_dags import emr_eks_dag_config
afbucket = f"{emr_eks_dag_config.DAG_BUCKET}/dags/example_spark_dags/" # ADDF MWAA Dags bucket
emr_virtual_cluster_id = emr_eks_dag_config.VIRTUAL_CLUSTER_ID
emr_execution_role_arn = emr_eks_dag_config.EMR_JOB_EXECUTION_ROLE
YR = "2020"
bucket = emr_eks_dag_config.RAW_BUCKET # ADDF RAW bucket
SRC_BUCKET = "tripdata"
SRC_KEY = "-citibike-tripdata.csv.zip"
DEST_BUCKET = emr_eks_dag_config.RAW_BUCKET
DEST_KEY = "-citibike-tripdata.csv"
now = datetime.now()
def try_create_aws_conn(**kwargs):
conn_id = "aws_emr_on_eks"
try:
AwsHook.get_connection(conn_id)
except AirflowException:
extra = json.dumps({"role_arn": emr_eks_dag_config.DAG_ROLE}, indent=2)
conn = Connection(conn_id=conn_id, conn_type="aws", host="", schema="", login="", extra=extra)
try:
session = settings.Session()
session.add(conn)
session.commit()
finally:
session.close()
def get_assumerole_creds():
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=emr_eks_dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session
def find_max_month():
session = get_assumerole_creds()
conn = session.client("s3")
mo = 0
for key in conn.list_objects(Bucket=SRC_BUCKET)["Contents"]:
if "JC" not in key["Key"] and YR in key["Key"]:
mo = mo + 1
print("returning max month {}".format(mo))
return mo
def copy_and_unzip_s3(**context):
session = get_assumerole_creds()
s3_resource = session.resource("s3")
zip_obj = s3_resource.Object(bucket_name=context["bucket"], key=context["key"])
buffer = BytesIO(zip_obj.get()["Body"].read())
z = zipfile.ZipFile(buffer)
print("downloaded zip {}, zipObj {}".format(z, zipfile))
for filename in z.namelist():
if filename.startswith("__"):
continue
file_info = z.getinfo(filename)
print("interating over zip {}, zipObj {}".format(filename, file_info))
try:
response = s3_resource.meta.client.upload_fileobj(
z.open(filename), Bucket=context["destbucket"], Key=context["destkey"]
)
print("uploaded to s3 {}".format(filename))
except Exception as e:
print(e)
def list_bucket(**context):
session = get_assumerole_creds()
conn = session.client("s3")
for key in conn.list_objects(Bucket=context["destbucket"])["Contents"]:
if "csv.gz" in key["Key"]:
print(key["Key"])
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 5,
"retry_delay": timedelta(minutes=1),
}
# [START howto_operator_emr_containers_start_job_run]
JOB_DRIVER = {
"sparkSubmitJobDriver": {
"entryPoint": "s3://" + afbucket + "citibike-spark-all.py",
"entryPointArguments": [bucket],
"sparkSubmitParameters": "--conf spark.executor.instances=3 --conf "
"spark.executor.memory=4G --conf spark.driver.memory=2G --conf spark.executor.cores=2 "
"--conf spark.sql.shuffle.partitions=60 --conf spark.dynamicAllocation.enabled=false",
}
}
CONFIGURATION_OVERRIDES = {
"monitoringConfiguration": {
"cloudWatchMonitoringConfiguration": {
"logGroupName": "/emr-containers/jobs",
"logStreamNamePrefix": "addf",
},
"persistentAppUI": "ENABLED",
"s3MonitoringConfiguration": {"logUri": "s3://" + afbucket + "/joblogs"},
}
}
with DAG(
dag_id="Citibike_Ridership_Analytics",
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=datetime(now.year, now.month, now.day, now.hour),
schedule_interval=None,
tags=["S3", "Citibike", "EMR on EKS", "Spark"],
) as dag:
start = DummyOperator(task_id="start", dag=dag)
listBucket = PythonOperator(
task_id="list_transformed_files",
python_callable=list_bucket,
op_kwargs={"destbucket": DEST_BUCKET},
dag=dag,
)
create_aws_conn = PythonOperator(
task_id="try_create_aws_conn",
python_callable=try_create_aws_conn,
dag=dag,
)
for i in range(1, find_max_month() + 1):
NEW_SRC_KEY = YR + str(i).zfill(2) + SRC_KEY
NEW_DEST_KEY = "citibike/csv/" + YR + str(i).zfill(2) + DEST_KEY
copyAndTransformS3File = PythonOperator(
task_id="copy_and_unzip_s3_" + str(i).zfill(2),
python_callable=copy_and_unzip_s3,
op_kwargs={
"bucket": SRC_BUCKET,
"key": NEW_SRC_KEY,
"destbucket": DEST_BUCKET,
"destkey": NEW_DEST_KEY,
},
dag=dag,
)
start >> copyAndTransformS3File >> listBucket
start_job_run = EMRContainerOperator(
task_id=f"start_citibike_ridership_analytics-{i}",
name="citibike_analytics_run",
virtual_cluster_id=emr_virtual_cluster_id,
client_request_token="".join(random.choice(string.digits) for _ in range(10)),
execution_role_arn=emr_execution_role_arn,
release_label="emr-6.2.0-latest",
job_driver=JOB_DRIVER,
configuration_overrides=CONFIGURATION_OVERRIDES,
aws_conn_id="aws_emr_on_eks",
)
job_sensor = EMRContainerSensor(
task_id=f"check_job_status-{i}",
job_id="{{ task_instance.xcom_pull(task_ids='start_citibike_ridership_analytics', key='return_value') }}",
virtual_cluster_id="{{ task_instance.xcom_pull(task_ids='start_citibike_ridership_analytics', key='virtual_cluster_id') }}",
aws_conn_id="aws_emr_on_eks",
)
create_aws_conn >> listBucket >> start_job_run >> job_sensor
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/example_spark_dags/__init__.py | modules/examples/example-spark-dags/example_spark_dags/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-spark-dags/example_spark_dags/emr_eks_dag_config.py | modules/examples/example-spark-dags/example_spark_dags/emr_eks_dag_config.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# This file is populated with configurations information when the Module is deployed
# Configuration parameters are exported as module level constants
#
# Example:
# SOME_PARAMETER = 'some value'
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/docker-build/service/app/app.py | modules/examples/docker-build/service/app/app.py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello_world():
return "Hello, Docker!"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/eb-sf-batch/stack.py | modules/examples/eb-sf-batch/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk.aws_batch as batch
import aws_cdk.aws_iam as iam
from aws_cdk import Duration, NestedStack, Size, Stack, Tags
from aws_cdk import aws_ecr as ecr
from aws_cdk import aws_ecs as ecs
from aws_cdk import aws_events as events
from aws_cdk import aws_stepfunctions as stepfunctions
from aws_cdk import aws_stepfunctions_tasks as step_functions_tasks
from aws_solutions_constructs.aws_eventbridge_stepfunctions import (
EventbridgeToStepfunctions,
)
# from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
# import cdk_nag
_logger: logging.Logger = logging.getLogger(__name__)
class EventDrivenBatch(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
fargate_job_queue_arn: str,
ecr_repo_name: str,
vcpus: int,
memory_limit_mib: int,
**kwargs: Any,
) -> None:
# Env vars
self.deployment_name = deployment_name
self.module_name = module_name
super().__init__(
scope,
id,
description="This stack deploys Cron Based Eventbridge which triggers Stepfunctions further triggering AWS Batch", # noqa: E501
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
# Batch Resources
role = iam.Role(
self,
f"{dep_mod}-AWSBatchRole",
assumed_by=iam.ServicePrincipal(service="ecs-tasks.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_managed_policy_arn(
self,
"MetricsAmazonEC2ContainerRegistryReadOnly",
f"arn:{self.partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
),
iam.ManagedPolicy.from_managed_policy_arn(
self,
"MetricsAmazonEC2ContainerServiceforEC2Role",
f"arn:{self.partition}:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role",
),
],
inline_policies={
"S3Read": iam.PolicyDocument(
statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["s3:GetObject"],
resources=["*"],
)
]
)
},
)
repository = ecr.Repository.from_repository_name(self, id=id, repository_name=f"{dep_mod}-{ecr_repo_name}")
img = ecs.EcrImage.from_ecr_repository(repository=repository, tag="latest")
definition = batch.EcsJobDefinition(
self,
f"{dep_mod}-JobDefinition",
job_definition_name=f"{project_name}-{deployment_name}-Job-Definition",
retry_attempts=1,
container=batch.EcsFargateContainerDefinition(
self,
f"{dep_mod} Container Definition",
environment={"AWS_REGION": NestedStack.of(self).region},
cpu=int(vcpus),
memory=Size.mebibytes(int(memory_limit_mib)),
execution_role=role,
job_role=role,
image=img,
# command=["echo", "I ran fine"],
),
)
# Step functions Definition
submit_metrics_job = step_functions_tasks.BatchSubmitJob(
self,
f"{dep_mod}-Batchjob",
job_name=f"{project_name}-{deployment_name}-Job",
job_queue_arn=fargate_job_queue_arn,
job_definition_arn=definition.job_definition_arn,
)
wait_job = stepfunctions.Wait(
self,
"Wait 30 Seconds",
time=stepfunctions.WaitTime.duration(Duration.seconds(30)),
)
# fail_job = stepfunctions.Fail(self, "Fail", cause="AWS Batch Job Failed", error="DescribeJob returned FAILED")
succeed_job = stepfunctions.Succeed(self, "Succeeded", comment="AWS Batch Job succeeded")
# Create Chain
definition = submit_metrics_job.next(wait_job).next(succeed_job) # type: ignore
self.eventbridge_sfn = EventbridgeToStepfunctions(
self,
f"{project_name}-{deployment_name}-eb-sf-batch",
state_machine_props=stepfunctions.StateMachineProps(definition=definition), # type: ignore
event_rule_props=events.RuleProps(schedule=events.Schedule.rate(Duration.minutes(1))),
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/eb-sf-batch/app.py | modules/examples/eb-sf-batch/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import EventDrivenBatch
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vcpus = os.getenv(_param("VCPUS")) # required
memory_limit_mib = os.getenv(_param("MEMORY_LIMIT_MIB")) # required
fargate_job_queue_arn = os.getenv(_param("FARGATE_JOB_QUEUE_ARN")) # required
ecr_repo_name = os.getenv(_param("ECR_REPO_NAME")) # required
if not fargate_job_queue_arn:
raise ValueError("Batch Queue Configuration is missing.")
if not ecr_repo_name:
raise ValueError("ECR Repository Name is missing.")
app = App()
stack = EventDrivenBatch(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
fargate_job_queue_arn=fargate_job_queue_arn,
ecr_repo_name=ecr_repo_name,
vcpus=vcpus, # type: ignore
memory_limit_mib=memory_limit_mib, # type: ignore
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"StateMachine": stack.eventbridge_sfn.state_machine.state_machine_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/eb-sf-batch/tests/test_stack.py | modules/examples/eb-sf-batch/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
event_driven_batch = stack.EventDrivenBatch(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
fargate_job_queue_arn="arn:foobar",
ecr_repo_name="myrepo",
vcpus=2,
memory_limit_mib=2048,
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(event_driven_batch)
template.resource_count_is("AWS::Batch::JobDefinition", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/eb-sf-batch/tests/__init__.py | modules/examples/eb-sf-batch/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/eb-sf-batch/images/src.py | modules/examples/eb-sf-batch/images/src.py | import pandas as pd
# initialize list of lists
data = [["Cristiano Ronlado", 1], ["Juan Mata", 2], ["Bruno Fernandez", 3]]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns=["Name", "Jersey Number"])
print("-" * 50)
print("StepFunctions - eventbridge - AWS Batch workflow tested")
print("-" * 50)
print(df)
print("-" * 50)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-tf-prereqs/stack.py | modules/examples/example-tf-prereqs/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, cast
import aws_cdk
import cdk_nag
from aws_cdk import Aspects, Stack, Tags, aws_dynamodb, aws_s3
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class TfPreReqs(Stack): # type: ignore
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
hash: str,
tf_s3_backend_encryption_type: str,
tf_s3_backend_retention_type: str,
tf_ddb_retention_type: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack deploys Storage resources",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
# S3 bucket for storing the remote state of Terraform
self.tf_state_s3bucket = aws_s3.Bucket(
self,
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if tf_s3_backend_retention_type.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
bucket_name=f"{project_name}-{deployment_name}-tfstate-bucket-{hash}",
id="tf-state-bucket",
encryption=aws_s3.BucketEncryption.KMS_MANAGED
if tf_s3_backend_encryption_type.upper() == "KMS"
else aws_s3.BucketEncryption.S3_MANAGED,
block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
enforce_ssl=True,
versioned=True,
)
# DDB Table for storing the LockIDs of Terraform
part_key = "LockID"
self.tf_ddb_lock_table = aws_dynamodb.Table(
self,
"tf_ddb_lock_table",
table_name=f"{project_name}-{deployment_name}-tf-ddb-lock-table",
partition_key=aws_dynamodb.Attribute(name=part_key, type=aws_dynamodb.AttributeType.STRING),
billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
removal_policy=aws_cdk.RemovalPolicy.RETAIN
if tf_ddb_retention_type.upper() == "RETAIN"
else aws_cdk.RemovalPolicy.DESTROY,
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
suppressions = [
NagPackSuppression(
**{
"id": "AwsSolutions-S1",
"reason": "Logging has been disabled for demo purposes",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
]
NagSuppressions.add_stack_suppressions(self, suppressions)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-tf-prereqs/app.py | modules/examples/example-tf-prereqs/app.py | import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import TfPreReqs
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
hash = os.getenv("SEEDFARMER_HASH", "")
tf_s3_backend_encryption_type = os.getenv("SEEDFARMER_PARAMETER_S3_ENCRYPTION_TYPE", "SSE")
tf_s3_backend_retention_type = os.getenv("SEEDFARMER_PARAMETER_S3_RETENTION_TYPE", "DESTROY")
tf_ddb_retention_type = os.getenv("SEEDFARMER_PARAMETER_DDB_RETENTION_TYPE", "DESTROY")
app = App()
stack = TfPreReqs(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
hash=hash,
tf_s3_backend_encryption_type=tf_s3_backend_encryption_type,
tf_s3_backend_retention_type=tf_s3_backend_retention_type,
tf_ddb_retention_type=tf_ddb_retention_type,
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"TfStateBucketName": stack.tf_state_s3bucket.bucket_name,
"TfLockTable": stack.tf_ddb_lock_table.table_name,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-tf-prereqs/tests/test_stack.py | modules/examples/example-tf-prereqs/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-tf-prereqs/tests/__init__.py | modules/examples/example-tf-prereqs/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/stack.py | modules/examples/example-dags/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, Optional, cast
import aws_cdk.aws_iam as aws_iam
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class DagIamRole(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
mwaa_exec_role: str,
bucket_policy_arn: Optional[str] = None,
permission_boundary_arn: Optional[str] = None,
**kwargs: Any,
) -> None:
# Env vars
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
self.mwaa_exec_role = mwaa_exec_role
super().__init__(
scope,
id,
description="This stack deploys Example DAGs resources",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
# Create Dag IAM Role and policy
dag_statement = aws_iam.PolicyDocument(
statements=[
aws_iam.PolicyStatement(
actions=["ec2:Describe*"],
effect=aws_iam.Effect.ALLOW,
resources=["*"],
)
]
)
managed_policies = (
[aws_iam.ManagedPolicy.from_managed_policy_arn(self, "bucket-policy", bucket_policy_arn)]
if bucket_policy_arn
else []
)
# Role with Permission Boundary
r_name = f"{self.project_name}-{self.deployment_name}-{self.module_name}-dag-role"
self.dag_role = aws_iam.Role(
self,
f"dag-role-{self.deployment_name}-{self.module_name}",
assumed_by=aws_iam.ArnPrincipal(self.mwaa_exec_role),
inline_policies={"DagPolicyDocument": dag_statement},
managed_policies=managed_policies,
permissions_boundary=aws_iam.ManagedPolicy.from_managed_policy_arn(
self,
f"perm-boundary-{self.deployment_name}-{self.module_name}",
permission_boundary_arn,
)
if permission_boundary_arn
else None,
role_name=r_name,
path="/",
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
[
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced describe only",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/app.py | modules/examples/example-dags/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import DagIamRole
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
mwaa_exec_role = os.getenv("SEEDFARMER_PARAMETER_MWAA_EXEC_ROLE_ARN", "")
bucket_policy_arn = os.getenv("SEEDFARMER_PARAMETER_BUCKET_POLICY_ARN")
permission_boundary_arn = os.getenv("SEEDFARMER_PERMISSION_BOUNDARY_ARN")
app = App()
stack = DagIamRole(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
mwaa_exec_role=mwaa_exec_role,
bucket_policy_arn=bucket_policy_arn,
permission_boundary_arn=permission_boundary_arn,
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string({"DagRoleArn": stack.dag_role.role_arn}),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/tests/test_stack.py | modules/examples/example-dags/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/tests/__init__.py | modules/examples/example-dags/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/example_dags/dag_config.py | modules/examples/example-dags/example_dags/dag_config.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/example_dags/sampledag.py | modules/examples/example-dags/example_dags/sampledag.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from datetime import timedelta
import boto3
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
import example_dags.dag_config as cf
DAG_ID = os.path.basename(__file__).replace(".py", "")
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
def triggerDagFn(**kwargs):
sts_client = boto3.client("sts")
# Call the assume_role method of the STSConnection object and pass the role
# ARN and a role session name
response = sts_client.assume_role(
RoleArn=cf.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
new_sts_client = session.client("sts")
print(f"The new client is : {new_sts_client.get_caller_identity()}")
ec2_client = session.client("ec2")
response = ec2_client.describe_instances()
print(f"response is: {response}")
return True
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1),
schedule_interval="@once",
) as dag:
triggerDag = PythonOperator(task_id="triggerDag", python_callable=triggerDagFn, provide_context=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/examples/example-dags/example_dags/__init__.py | modules/examples/example-dags/example_dags/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/core/metadata-storage/stack.py | modules/core/metadata-storage/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, RemovalPolicy, Stack, Tags
from aws_cdk import aws_dynamodb as dynamo
from aws_cdk import aws_glue_alpha as glue_alpha
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class MetadataStorageStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
scene_table_suffix: str,
bagfile_table_suffix: str,
glue_db_suffix: str,
stack_description: str,
**kwargs: Any,
) -> None:
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
# used to tag AWS resources. Tag Value length cant exceed 256 characters
full_dep_mod = dep_mod[:256] if len(dep_mod) > 256 else dep_mod
super().__init__(scope, id, description=stack_description, **kwargs)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=full_dep_mod)
rosbag_bagfile_p_key = "bag_file_prefix"
rosbag_bagfile_table = dynamo.Table(
self,
"dynamobagfiletable",
table_name=f"{full_dep_mod}-{bagfile_table_suffix}",
partition_key=dynamo.Attribute(name=rosbag_bagfile_p_key, type=dynamo.AttributeType.STRING),
billing_mode=dynamo.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY,
point_in_time_recovery=True,
)
rosbag_scene_p_key = "bag_file"
rosbag_scene_sort_key = "scene_id"
rosbag_scene_table = dynamo.Table(
self,
"dynamotablescenes",
table_name=f"{full_dep_mod}-{scene_table_suffix}",
partition_key=dynamo.Attribute(name=rosbag_scene_p_key, type=dynamo.AttributeType.STRING),
sort_key=dynamo.Attribute(name=rosbag_scene_sort_key, type=dynamo.AttributeType.STRING),
billing_mode=dynamo.BillingMode.PAY_PER_REQUEST,
removal_policy=RemovalPolicy.DESTROY,
point_in_time_recovery=True,
stream=dynamo.StreamViewType.NEW_AND_OLD_IMAGES,
)
glue_db = glue_alpha.Database(
self,
"glue_db",
database_name=f"{full_dep_mod}-{glue_db_suffix}",
)
self.rosbag_bagfile_table = rosbag_bagfile_table
self.rosbag_bagfile_partition_key = rosbag_bagfile_p_key
self.rosbag_scene_table = rosbag_scene_table
self.rosbag_scene_table_stream_arn = rosbag_scene_table.table_stream_arn
self.rosbag_scene_p_key = rosbag_scene_p_key
self.rosbag_scene_sort_key = rosbag_scene_sort_key
self.glue_db = glue_db
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks(verbose=True))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/core/metadata-storage/app.py | modules/core/metadata-storage/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import MetadataStorageStack
# Project vars
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
# App Env vars
scene_suffix = os.getenv(_param("ROSBAG_SCENE_TABLE_SUFFIX"))
if not scene_suffix:
raise ValueError("ROSBAG_SCENE_TABLE_SUFFIX is not populated ")
glue_db_suffix = os.getenv(_param("GLUE_DB_SUFFIX"))
if not glue_db_suffix:
raise ValueError("GLUE_DB_SUFFIX is not populated")
bagfile_suffix = os.getenv(_param("ROSBAG_BAGFILE_TABLE_SUFFIX"))
if not bagfile_suffix:
raise ValueError("ROSBAG_BAGFILE_TABLE_SUFFIX is not populated")
def generate_description() -> str:
soln_id = os.getenv(_param("SOLUTION_ID"), None)
soln_name = os.getenv(_param("SOLUTION_NAME"), None)
soln_version = os.getenv(_param("SOLUTION_VERSION"), None)
desc = f"{project_name} - Metadata Storage Module"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = MetadataStorageStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
scene_table_suffix=scene_suffix,
bagfile_table_suffix=bagfile_suffix,
glue_db_suffix=glue_db_suffix,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"RosbagBagFileTable": stack.rosbag_bagfile_table.table_name,
"RosbagBagFilePartitionKey": stack.rosbag_bagfile_partition_key,
"RosbagSceneMetadataTable": stack.rosbag_scene_table.table_name,
"RosbagSceneMetadataStreamArn": stack.rosbag_scene_table_stream_arn,
"RosbagSceneMetadataPartitionKey": stack.rosbag_scene_p_key,
"RosbagSceneMetadataSortKey": stack.rosbag_scene_sort_key,
"GlueDBName": stack.glue_db.database_name,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/core/metadata-storage/tests/test_app.py | modules/core/metadata-storage/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ROSBAG_SCENE_TABLE_SUFFIX"] = "scene-suffix"
os.environ["SEEDFARMER_PARAMETER_GLUE_DB_SUFFIX"] = "glue-db-suffix"
os.environ["SEEDFARMER_PARAMETER_ROSBAG_BAGFILE_TABLE_SUFFIX"] = "ros-table-suffix"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_scene_suffix(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ROSBAG_SCENE_TABLE_SUFFIX"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_ROSBAG_SCENE_TABLE_SUFFIX"] == "scene-suffix"
def test_glue_db_suffix(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_GLUE_DB_SUFFIX"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_GLUE_DB_SUFFIX"] == "glue-db-suffix"
def test_bagfile_suffix(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ROSBAG_BAGFILE_TABLE_SUFFIX"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_ROSBAG_BAGFILE_TABLE_SUFFIX"] == "ros-table-suffix"
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/core/metadata-storage/tests/test_stack.py | modules/core/metadata-storage/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
deployment_name = "test-deployment"
module_name = "test-module"
metadata_storage_stack = stack.MetadataStorageStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
scene_table_suffix="scene-suffix",
bagfile_table_suffix="glue-db-suffix",
glue_db_suffix="ros-table-suffix",
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(metadata_storage_stack)
template.resource_count_is("AWS::DynamoDB::Table", 2)
template.resource_count_is("AWS::Glue::Database", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/core/metadata-storage/tests/__init__.py | modules/core/metadata-storage/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/eureka/stack.py | modules/simulations/eureka/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from string import Template
from typing import Any, cast
import yaml
from aws_cdk import Duration, Environment, Stack, Tags
from aws_cdk import aws_ecr as ecr
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from aws_cdk import aws_sqs as sqs
from aws_cdk.aws_ecr_assets import DockerImageAsset
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_ecr_deployment import DockerImageName, ECRDeployment
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
project_dir = os.path.dirname(os.path.abspath(__file__))
ROLE_NAME = "addf-eureka-simulation-role"
APPLICATION_IMAGE_NAME = "ubuntu-ros2"
class EurekaStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
stack_description: str,
eks_cluster_name: str,
eks_cluster_admin_role_arn: str,
eks_oidc_arn: str,
eks_cluster_open_id_connect_issuer: str,
simulation_data_bucket_name: str,
sqs_name: str,
fsx_volume_handle: str,
fsx_mount_point: str,
application_ecr_name: str,
env: Environment,
**kwargs: Any,
) -> None:
super().__init__(scope, id, description=stack_description, **kwargs)
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
dep_mod = f"{self.project_name}-{self.deployment_name}-{self.module_name}"
dep_mod = dep_mod[:64]
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=dep_mod)
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_cluster_admin_role_arn,
open_id_connect_provider=provider,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
manifest = self.get_fsx_static_provisioning_manifest(fsx_volume_handle, fsx_mount_point, env)
manifest_file = list(yaml.load_all(manifest, Loader=yaml.FullLoader))
loop_iteration = 0
for value in manifest_file:
loop_iteration = loop_iteration + 1
manifest_id = "Eureka" + str(loop_iteration)
eks_cluster.add_manifest(manifest_id, value)
self.iam_role_arn = self.create_simulation_role(
eks_cluster_open_id_connect_issuer,
eks_oidc_arn,
simulation_data_bucket_name,
)
self.sqs_url = self.create_sqs(sqs_name, env)
self.application_image_uri = self.build_application_image(application_ecr_name)
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to resources",
}
),
],
)
def create_simulation_role(
self,
eks_cluster_open_id_connect_issuer: str,
eks_oidc_arn: str,
simulation_data_bucket_name: str,
) -> str:
role = iam.Role(
self,
ROLE_NAME,
assumed_by=iam.FederatedPrincipal(
eks_oidc_arn,
{"StringLike": {f"{eks_cluster_open_id_connect_issuer}:sub": "system:serviceaccount:*"}},
"sts:AssumeRoleWithWebIdentity",
),
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"s3:PutObject",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:ListBucket",
"s3:GetObjectVersion",
"s3:ListMultipartUploadParts",
],
resources=[
f"arn:{self.partition}:s3:::${simulation_data_bucket_name}/*",
f"arn:{self.partition}:s3:::${simulation_data_bucket_name}",
],
)
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"sqs:*",
"fsx:CreateFileSystem",
"fsx:DeleteFileSystem",
"fsx:DescribeFileSystems",
"fsx:TagResource",
],
resources=["*"],
)
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"bedrock:ListFoundationModels",
"bedrock:InvokeModelWithResponseStream",
"bedrock:InvokeModel",
"bedrock:InvokeAgent",
],
resources=["*"],
)
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"iam:CreateServiceLinkedRole",
"iam:AttachRolePolicy",
"iam:PutRolePolicy",
],
resources=[
f"arn:{self.partition}:iam::*:role/aws-service-role/s3.data-source.lustre.fsx.amazonaws.com/*"
],
)
)
fsx_policy = iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"iam:CreateServiceLinkedRole",
],
resources=["*"],
)
fsx_policy.add_conditions({"StringLike": {"iam:AWSServiceName": ["fsx.amazonaws.com"]}})
role.add_to_principal_policy(fsx_policy)
return role.role_arn
def build_application_image(self, application_ecr_name: str) -> str:
local_image = DockerImageAsset(
self,
"ImageExtractionDockerImage",
directory=os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"),
)
repo = ecr.Repository.from_repository_name(
self, id=f"ecr-{application_ecr_name}", repository_name=application_ecr_name
)
image_uri = f"{repo.repository_uri}:{APPLICATION_IMAGE_NAME}"
ECRDeployment(
self,
"RoboticsImageUri",
src=DockerImageName(local_image.image_uri),
dest=DockerImageName(image_uri),
)
return image_uri
def create_sqs(self, sqs_name: str, env: Environment) -> str:
sqs.Queue(self, sqs_name, visibility_timeout=Duration.seconds(60))
return f"https://sqs.{env.region}.amazonaws.com/{env.account}/{sqs_name}"
def get_fsx_static_provisioning_manifest(
self,
volume_handle: str,
mount_point: str,
env: Environment,
) -> str:
t = Template(open(os.path.join(project_dir, "manifests/fsx_static_provisioning.yaml"), "r").read())
return t.substitute(
VOLUME_HANDLE=volume_handle,
MOUNT_NAME=mount_point,
REGION=env.region,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/eureka/app.py | modules/simulations/eureka/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
from aws_cdk import App, CfnOutput, Environment
from stack import EurekaStack
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME")
module_name = os.getenv("SEEDFARMER_MODULE_NAME")
if len(f"{project_name}-{deployment_name}") > 36:
raise ValueError("This module cannot support a project+deployment name character length greater than 35")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
eks_cluster_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "")
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "")
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "")
eks_cluster_open_id_connect_issuer = os.getenv(_param("EKS_CLUSTER_OPEN_ID_CONNECT_ISSUER"), "")
application_ecr_name = os.getenv(_param("APPLICATION_ECR_NAME"), "")
sqs_name = os.getenv(_param("SQS_NAME"), "")
fsx_volume_handle = os.getenv(_param("FSX_VOLUME_HANDLE"), "")
fsx_mount_point = os.getenv(_param("FSX_MOUNT_POINT"), "")
data_bucket_name = os.getenv(_param("DATA_BUCKET_NAME"), "")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "My Module Default Description"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
eureka_stack = EurekaStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=cast(str, project_name),
deployment_name=cast(str, deployment_name),
module_name=cast(str, module_name),
stack_description=generate_description(),
eks_cluster_name=eks_cluster_name,
eks_cluster_admin_role_arn=eks_cluster_admin_role_arn,
eks_oidc_arn=eks_oidc_arn,
eks_cluster_open_id_connect_issuer=eks_cluster_open_id_connect_issuer,
simulation_data_bucket_name=data_bucket_name,
sqs_name=sqs_name,
fsx_volume_handle=fsx_volume_handle,
fsx_mount_point=fsx_mount_point,
application_ecr_name=application_ecr_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=eureka_stack,
id="metadata",
value=eureka_stack.to_json_string(
{
"IamRoleArn": eureka_stack.iam_role_arn,
"ApplicationImageUri": eureka_stack.application_image_uri,
"SqsUrl": eureka_stack.sqs_url,
}
),
)
app.synth()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/eureka/tests/test_app.py | modules/simulations/eureka/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "12345678"
os.environ["CDK_DEFAULT_REGION"] = "us-east-2"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN"] = "arn:aws:iam:us-east-1:1234567890:role/test-role"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] = "test_cluster"
os.environ["SEEDFARMER_PARAMETER_EKS_OIDC_ARN"] = (
"arn:aws:eks:us-east-1:1234567890:oidc-provider/oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/test-ocid"
)
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_OPEN_ID_CONNECT_ISSUER"] = "test_open_id_connect_issuer"
os.environ["SEEDFARMER_PARAMETER_APPLICATION_ECR_URI"] = "1234567890.dkr.ecr.us-east-1.amazonaws.com/test-repo"
os.environ["SEEDFARMER_PARAMETER_SQS_NAME"] = "test-message-queue"
os.environ["SEEDFARMER_PARAMETER_FSX_VOLUME_HANDLE"] = "fs-12345678"
os.environ["SEEDFARMER_PARAMETER_FSX_MOUNT_POINT"] = "mnttest"
os.environ["SEEDFARMER_PARAMETER_DATA_BUCKET_NAME"] = "test-data-bucket"
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_project_deployment_name_length(stack_defaults):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project-incredibly"
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "module cannot support a project+deployment name character length greater than" in str(e)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/eureka/tests/test_stack.py | modules/simulations/eureka/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk import Environment
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "1234567890"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_app(stack_defaults):
import stack
app = cdk.App()
stack = stack.EurekaStack(
scope=app,
id="test-proj",
project_name="test_proj",
deployment_name="test_deploy",
module_name="test_module",
stack_description="this_is_test_stack",
eks_cluster_name="test_cluster",
eks_cluster_admin_role_arn="arn:aws:iam:us-east-1:1234567890:role/test-role",
eks_oidc_arn="arn:aws:eks:us-east-1:1234567890:oidc-provider/oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/test-ocid",
eks_cluster_open_id_connect_issuer="test_open_id_connect_issuer",
simulation_data_bucket_name="test-bucket",
sqs_name="message-queue",
fsx_volume_handle="fs-12345678",
fsx_mount_point="mntmount",
application_ecr_name="docker.ecr.test_image_name",
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(stack)
template.resource_count_is("AWS::IAM::Role", 2)
template.resource_count_is("AWS::IAM::Policy", 2)
template.resource_count_is("AWS::SQS::Queue", 1)
template.resource_count_is("Custom::CDKBucketDeployment", 1)
template.resource_count_is("Custom::AWSCDK-EKS-KubernetesResource", 2)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/eureka/tests/__init__.py | modules/simulations/eureka/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/stack.py | modules/simulations/k8s-managed/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, Stack, Tags, aws_eks, aws_iam
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class SimulationDags(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
mwaa_exec_role: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_openid_connect_provider_arn: str,
**kwargs: Any,
) -> None:
# Env vars
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
self.mwaa_exec_role = mwaa_exec_role
super().__init__(
scope,
id,
description="(SO9154) Autonomous Driving Data Framework (ADDF) - k8s-managed",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
# Create Dag IAM Role and policy
policy_statements = [
aws_iam.PolicyStatement(
actions=["sqs:*"],
effect=aws_iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:sqs:{self.region}:{self.account}:{project_name}-{deployment_name}-{module_name}*"
],
),
aws_iam.PolicyStatement(
actions=["ecr:*"],
effect=aws_iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{project_name}-{deployment_name}-{module_name}*"
],
),
]
dag_document = aws_iam.PolicyDocument(statements=policy_statements)
r_name = f"{self.project_name}-{self.deployment_name}-{self.module_name}-dag-role"
self.dag_role = aws_iam.Role(
self,
f"dag-role-{self.deployment_name}-{self.module_name}",
assumed_by=aws_iam.ArnPrincipal(self.mwaa_exec_role),
inline_policies={"DagPolicyDocument": dag_document},
role_name=r_name,
path="/",
)
provider = aws_eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, "Provider", eks_openid_connect_provider_arn
)
cluster = aws_eks.Cluster.from_cluster_attributes(
self,
f"eks-{self.deployment_name}-{self.module_name}",
cluster_name=eks_cluster_name,
open_id_connect_provider=provider,
kubectl_role_arn=eks_admin_role_arn,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
namespace = cluster.add_manifest(
"namespace",
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {"name": module_name},
},
)
service_account = cluster.add_service_account("service-account", name=module_name, namespace=module_name)
service_account.node.add_dependency(namespace)
service_account_role: aws_iam.Role = cast(aws_iam.Role, service_account.role)
if service_account_role.assume_role_policy:
service_account_role.assume_role_policy.add_statements(
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["sts:AssumeRole"],
principals=[aws_iam.ArnPrincipal(mwaa_exec_role)],
)
)
for statement in policy_statements:
service_account_role.add_to_policy(statement=statement)
rbac_role = cluster.add_manifest(
"rbac-role",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {"name": "module-owner", "namespace": module_name},
"rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}],
},
)
rbac_role.node.add_dependency(namespace)
rbac_role_binding = cluster.add_manifest(
"rbac-role-binding",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding",
"metadata": {"name": module_name, "namespace": module_name},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "Role",
"name": "module-owner",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": module_name,
},
],
},
)
rbac_role_binding.node.add_dependency(service_account)
rbac_role = cluster.add_manifest(
"rbac-role-default",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {"name": "default-access", "namespace": "default"},
"rules": [
{
"apiGroups": ["*"],
"resources": ["*"],
"verbs": ["get", "list", "watch"],
}
],
},
)
rbac_role.node.add_dependency(namespace)
rbac_role_binding = cluster.add_manifest(
"rbac-role-binding-default",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding",
"metadata": {"name": "default-access", "namespace": "default"},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "Role",
"name": "default-access",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": module_name,
},
],
},
)
rbac_role_binding.node.add_dependency(service_account)
rbac_cluster_role_binding = cluster.add_manifest(
"rbac-cluster-role-binding",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRoleBinding",
"metadata": {"name": f"system-access-{module_name}"},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "system-access",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": module_name,
},
],
},
)
rbac_cluster_role_binding.node.add_dependency(service_account)
self.eks_service_account_role = service_account.role
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-L1",
"reason": "Using latest kubectl v1.29",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/app.py | modules/simulations/k8s-managed/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import SimulationDags
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
mwaa_exec_role = os.getenv("SEEDFARMER_PARAMETER_MWAA_EXEC_ROLE", "")
eks_cluster_name = os.getenv("SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME", "")
eks_admin_role_arn = os.getenv("SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN", "")
eks_oidc_provider_arn = os.getenv("SEEDFARMER_PARAMETER_EKS_OIDC_ARN", "")
app = App()
stack = SimulationDags(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
mwaa_exec_role=mwaa_exec_role,
eks_cluster_name=eks_cluster_name,
eks_admin_role_arn=eks_admin_role_arn,
eks_openid_connect_provider_arn=eks_oidc_provider_arn,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"DagRoleArn": stack.dag_role.role_arn,
"EksServiceAccountRoleArn": stack.eks_service_account_role.role_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/tests/test_stack.py | modules/simulations/k8s-managed/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/tests/__init__.py | modules/simulations/k8s-managed/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/fine_parallel_mock.py | modules/simulations/k8s-managed/simulation_dags/fine_parallel_mock.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import random
import string
from datetime import timedelta
from typing import Any, Dict, Iterator, List, TypeVar
import boto3
from airflow import DAG
from airflow.models.taskinstance import TaskInstance
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
from mypy_boto3_sqs.client import SQSClient
from simulation_dags import dag_config
from simulation_dags.eks_job_operator import EksJobOperator
ValueType = TypeVar("ValueType")
DAG_ID = os.path.basename(__file__).replace(".py", "")
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("WARNING")
def get_client() -> SQSClient:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session.client("sqs")
def create_and_populate_queue(num_items: int) -> str:
client = get_client()
response = client.create_queue(
QueueName=(
f"{dag_config.PROJECT_NAME}-{dag_config.DEPLOYMENT_NAME}-{dag_config.MODULE_NAME}-"
f"{''.join(random.choices(string.ascii_letters + string.digits, k=6))}"
)
)
queue_url = response["QueueUrl"]
def chunks(lst: List[ValueType], n: int) -> Iterator[ValueType]:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
for i, chunk in enumerate(chunks(range(num_items), 10)):
entries = [{"Id": str(j), "MessageBody": f"message-{i}-{j}-{value}"} for j, value in enumerate(chunk)]
client.send_message_batch(QueueUrl=queue_url, Entries=entries)
return queue_url
def delete_queue(ti: TaskInstance, create_queue_task_id: str) -> None:
client = get_client()
url = ti.xcom_pull(task_ids=create_queue_task_id)
client.delete_queue(QueueUrl=url)
def get_job_body(create_queue_task_id: str, parallelism: int) -> Dict[str, Any]:
return {
"apiVerson": "batch/v1",
"kind": "Job",
"metadata": {},
"spec": {
"parallelism": parallelism,
"completions": parallelism,
"backoffLimit": 6,
"template": {
"spec": {
"serviceAccountName": dag_config.MODULE_NAME,
"restartPolicy": "OnFailure",
"volumes": [{"name": "shared-data", "emptyDir": {}}],
"containers": [
{
"name": "sqs-manager",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/sqs_manager.py",
],
"args": ["--url", "$(URL)", "--dir", "$(DIR)"],
"env": [
{
"name": "URL",
"value": f"{{{{ ti.xcom_pull(task_ids='{create_queue_task_id}') }}}}",
},
{"name": "DIR", "value": "/shared-data"},
{"name": "DEBUG", "value": "true"},
{
"name": "AWS_DEFAULT_REGION",
"value": dag_config.REGION,
},
{
"name": "AWS_ACCOUNT_ID",
"value": dag_config.ACCOUNT_ID,
},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
{
"name": "simulator",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/simulator.py",
],
"args": [
"--dir",
"$(DIR)",
"--max-seconds",
"$(MAX_SECONDS)",
"--failure-seed",
"$(FAILURE_SEED)",
],
"env": [
{"name": "DIR", "value": "/shared-data"},
{"name": "MAX_SECONDS", "value": "60"},
# probability of failure = 1/FAILURE_SEED if 1 <= FAILURE_SEED <= 32768 else 0
{"name": "FAILURE_SEED", "value": "32769"},
{"name": "DEBUG", "value": "true"},
{
"name": "AWS_DEFAULT_REGION",
"value": dag_config.REGION,
},
{
"name": "AWS_ACCOUNT_ID",
"value": dag_config.ACCOUNT_ID,
},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
],
}
},
},
}
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=6),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
total_simulations = 50
parallelism = 10
create_queue_task = PythonOperator(
task_id="create-queue",
dag=dag,
provide_context=True,
python_callable=create_and_populate_queue,
op_kwargs={"num_items": total_simulations},
)
job_task = EksJobOperator(
task_id="fine-parallel-mock",
dag=dag,
namespace=dag_config.EKS_NAMESPACE, # type: ignore
body=get_job_body(
create_queue_task_id=create_queue_task.task_id,
parallelism=parallelism,
),
delete_policy="IfSucceeded",
cluster_name=dag_config.EKS_CLUSTER_NAME, # type: ignore
service_account_role_arn=dag_config.EKS_SERVICE_ACCOUNT_ROLE, # type: ignore
get_logs=False,
)
delete_queue_task = PythonOperator(
task_id="delete_queue",
dag=dag,
provide_context=True,
python_callable=delete_queue,
op_kwargs={"create_queue_task_id": create_queue_task.task_id},
)
create_queue_task >> job_task >> delete_queue_task
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/coarse_parallel_mock.py | modules/simulations/k8s-managed/simulation_dags/coarse_parallel_mock.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import random
import string
from datetime import timedelta
from typing import Any, Dict, Iterator, List, TypeVar
import boto3
from airflow import DAG
from airflow.models.taskinstance import TaskInstance
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
from mypy_boto3_sqs.client import SQSClient
from simulation_dags import dag_config
from simulation_dags.eks_job_operator import EksJobOperator
ValueType = TypeVar("ValueType")
DAG_ID = os.path.basename(__file__).replace(".py", "")
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("WARNING")
def get_client() -> SQSClient:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session.client("sqs")
def create_and_populate_queue(num_items: int) -> str:
client = get_client()
response = client.create_queue(
QueueName=(
f"{dag_config.PROJECT_NAME}-{dag_config.DEPLOYMENT_NAME}-{dag_config.MODULE_NAME}-"
f"{''.join(random.choices(string.ascii_letters + string.digits, k=6))}"
)
)
queue_url = response["QueueUrl"]
def chunks(lst: List[ValueType], n: int) -> Iterator[ValueType]:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
for i, chunk in enumerate(chunks(range(num_items), 10)):
entries = [{"Id": str(j), "MessageBody": f"message-{i}-{j}-{value}"} for j, value in enumerate(chunk)]
client.send_message_batch(QueueUrl=queue_url, Entries=entries)
return queue_url
def delete_queue(ti: TaskInstance, create_queue_task_id: str) -> None:
client = get_client()
url = ti.xcom_pull(task_ids=create_queue_task_id)
client.delete_queue(QueueUrl=url)
def get_job_body(create_queue_task_id: str, parallelism: int, completions: int) -> Dict[str, Any]:
return {
"apiVerson": "batch/v1",
"kind": "Job",
"metadata": {},
"spec": {
"parallelism": parallelism,
"completions": completions,
"backoffLimit": 6,
"template": {
"spec": {
"serviceAccountName": dag_config.MODULE_NAME,
"restartPolicy": "Never",
"volumes": [{"name": "shared-data", "emptyDir": {}}],
"containers": [
{
"name": "sqs-manager",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/sqs_manager.py",
],
"args": [
"--url",
"$(URL)",
"--dir",
"$(DIR)",
"--single-message",
],
"env": [
{
"name": "URL",
"value": f"{{{{ ti.xcom_pull(task_ids='{create_queue_task_id}') }}}}",
},
{"name": "DIR", "value": "/shared-data"},
{"name": "DEBUG", "value": "true"},
{
"name": "AWS_DEFAULT_REGION",
"value": dag_config.REGION,
},
{
"name": "AWS_ACCOUNT_ID",
"value": dag_config.ACCOUNT_ID,
},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
{
"name": "simulator",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/simulator.py",
],
"args": [
"--dir",
"$(DIR)",
"--max-seconds",
"$(MAX_SECONDS)",
"--failure-seed",
"$(FAILURE_SEED)",
],
"env": [
{"name": "DIR", "value": "/shared-data"},
{"name": "MAX_SECONDS", "value": "60"},
# probability of failure = 1/FAILURE_SEED if 1 <= FAILURE_SEED <= 32768 else 0
{"name": "FAILURE_SEED", "value": "32769"},
{"name": "DEBUG", "value": "true"},
{
"name": "AWS_DEFAULT_REGION",
"value": dag_config.REGION,
},
{
"name": "AWS_ACCOUNT_ID",
"value": dag_config.ACCOUNT_ID,
},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
],
}
},
},
}
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=6),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
total_simulations = 50
parallelism = 10
create_queue_task = PythonOperator(
task_id="create-queue",
dag=dag,
provide_context=True,
python_callable=create_and_populate_queue,
op_kwargs={"num_items": total_simulations},
)
job_task = EksJobOperator(
task_id="coarse-parallel-mock",
dag=dag,
namespace=dag_config.EKS_NAMESPACE, # type: ignore
body=get_job_body(
create_queue_task_id=create_queue_task.task_id,
parallelism=parallelism,
completions=total_simulations,
),
delete_policy="IfSucceeded",
cluster_name=dag_config.EKS_CLUSTER_NAME, # type: ignore
service_account_role_arn=dag_config.EKS_SERVICE_ACCOUNT_ROLE, # type: ignore
get_logs=False,
)
delete_queue_task = PythonOperator(
task_id="delete_queue",
dag=dag,
provide_context=True,
python_callable=delete_queue,
op_kwargs={"create_queue_task_id": create_queue_task.task_id},
)
create_queue_task >> job_task >> delete_queue_task
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/dag_config.py | modules/simulations/k8s-managed/simulation_dags/dag_config.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
# This file is populated with configurations information when the Module is deployed
# Configuration parameters are exported as module level constants
#
# Example:
# SOME_PARAMETER = 'some value'
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/eks_job_operator.py | modules/simulations/k8s-managed/simulation_dags/eks_job_operator.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import subprocess
import tempfile
from threading import Timer
from typing import Any
from airflow_kubernetes_job_operator import KubernetesJobOperator
logging.basicConfig(level="DEBUG")
logger = logging.getLogger("airflow")
KUBECONFIG_REFRESH_RATE = 180
class EksJobOperator(KubernetesJobOperator): # type: ignore
def __init__(
self,
*args: Any,
cluster_name: str,
service_account_role_arn: str,
**kwargs: Any,
) -> None:
self.cluster_name = cluster_name
self.service_account_role_arn = service_account_role_arn
self.logger = logger
super().__init__(*args, **kwargs)
def update_kubeconfig(self, path: str) -> None:
args = [
"aws",
"eks",
"update-kubeconfig",
"--name",
self.cluster_name,
"--role-arn",
self.service_account_role_arn,
"--kubeconfig",
path,
]
logger.info("command: %s", args)
subprocess.check_call(args)
def load_kubeconfig(self) -> None:
logger.debug("Reloading kubeconfig")
# Load the configuration.
self.job_runner.client.load_kube_config(
config_file=self.config_file,
is_in_cluster=self.in_cluster,
context=self.cluster_context,
)
Timer(KUBECONFIG_REFRESH_RATE, self.load_kubeconfig).start()
def pre_execute(self, context: Any) -> Any:
"""Called before execution by the airflow system.
Overriding this method without calling its super() will
break the job operator.
Arguments:
context -- The airflow context
"""
# Load the configuration.
with tempfile.NamedTemporaryFile() as temp:
kubeconfig_path = temp.name
self.update_kubeconfig(path=kubeconfig_path)
self.config_file = kubeconfig_path
self.in_cluster = False
self.cluster_context = None
# This background thread refereshes the kubeconfig to eliminate
# timeouts for long running jobs
Timer(KUBECONFIG_REFRESH_RATE, self.load_kubeconfig).start()
# call parent.
return super().pre_execute(context)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/coarse_fan_out_mock.py | modules/simulations/k8s-managed/simulation_dags/coarse_fan_out_mock.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import random
import string
from datetime import timedelta
from typing import Any, Dict, Iterator, List, TypeVar
import boto3
from airflow import DAG
from airflow.exceptions import AirflowFailException
from airflow.models.taskinstance import TaskInstance
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
from mypy_boto3_sqs.client import SQSClient
from simulation_dags import dag_config
from simulation_dags.eks_job_operator import EksJobOperator
ValueType = TypeVar("ValueType")
DAG_ID = os.path.basename(__file__).replace(".py", "")
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
def get_client() -> SQSClient:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session.client("sqs")
def create_and_populate_queue(num_items: int) -> str:
client = get_client()
response = client.create_queue(
QueueName=(
f"{dag_config.PROJECT_NAME}-{dag_config.DEPLOYMENT_NAME}-{dag_config.MODULE_NAME}-"
f"{''.join(random.choices(string.ascii_letters + string.digits, k=6))}"
)
)
queue_url = response["QueueUrl"]
def chunks(lst: List[ValueType], n: int) -> Iterator[ValueType]:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
for i, chunk in enumerate(chunks(range(num_items), 10)):
entries = [{"Id": str(j), "MessageBody": f"message-{i}-{j}-{value}"} for j, value in enumerate(chunk)]
client.send_message_batch(QueueUrl=queue_url, Entries=entries)
return queue_url
def fail_if_job_failed(ti: TaskInstance, eks_job_task_id: str) -> None:
status = ti.xcom_pull(task_ids=eks_job_task_id, key="status")
print(f"eks_job_task_id: {eks_job_task_id} status: {status}")
if status != "JOB_COMPLETED":
raise AirflowFailException("EKS Job Failed")
def delete_queue(ti: TaskInstance, create_queue_task_id: str) -> None:
client = get_client()
url = ti.xcom_pull(task_ids=create_queue_task_id)
client.delete_queue(QueueUrl=url)
def get_job_body(create_queue_task_id: str, parallelism: int, completions: int, max_failures: int) -> Dict[str, Any]:
worker_pod_body = json.dumps(
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {},
"spec": {
"serviceAccountName": dag_config.MODULE_NAME,
"restartPolicy": "Never",
"volumes": [{"name": "shared-data", "emptyDir": {}}],
"containers": [
{
"name": "sqs-manager",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/sqs_manager.py",
],
"args": [
"--url",
"$(QUEUE_URL)",
"--dir",
"$(DIR)",
"--single-message",
],
"env": [
{"name": "DIR", "value": "/shared-data"},
{"name": "DEBUG", "value": "true"},
{"name": "AWS_DEFAULT_REGION", "value": dag_config.REGION},
{"name": "AWS_ACCOUNT_ID", "value": dag_config.ACCOUNT_ID},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
{
"name": "simulator",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared-data"}],
"command": [
"python",
"/var/simulation-mock/simulation_mock/simulator.py",
],
"args": [
"--dir",
"$(DIR)",
"--max-seconds",
"$(MAX_SECONDS)",
"--failure-seed",
"$(FAILURE_SEED)",
],
"env": [
{"name": "DIR", "value": "/shared-data"},
{"name": "MAX_SECONDS", "value": "60"},
# probability of failure = 1/FAILURE_SEED if 1 <= FAILURE_SEED <= 32768 else 0
{"name": "FAILURE_SEED", "value": "32769"},
{"name": "DEBUG", "value": "true"},
{"name": "AWS_DEFAULT_REGION", "value": dag_config.REGION},
{"name": "AWS_ACCOUNT_ID", "value": dag_config.ACCOUNT_ID},
],
"livenessProbe": {
"exec": {
"command": ["cat", "/tmp/container.running"],
},
"initialDelaySeconds": 5,
"periodSeconds": 5,
},
},
],
},
}
)
return {
"apiVerson": "batch/v1",
"kind": "Job",
"metadata": {
"labels": {"app": "addf-job"},
},
"spec": {
"parallelism": 1,
"completions": 1,
"backoffLimit": 6,
"template": {
"metadata": {
"labels": {"app": "addf-job"},
},
"spec": {
"serviceAccountName": dag_config.MODULE_NAME,
"restartPolicy": "Never",
"priorityClassName": "system-node-critical",
"containers": [
{
"name": "pod-launcher",
"image": dag_config.SIMULATION_MOCK_IMAGE,
"command": ["kopf", "run"],
"args": [
"simulation_mock/pod_launcher.py",
"--namespace",
dag_config.EKS_NAMESPACE,
# "--verbose"
],
"resources": {
"requests": {"memory": "512Mi", "cpu": "1"},
"limits": {"memory": "1Gi", "cpu": "1"},
},
"env": [
{
"name": "NAMESPACE",
"value": dag_config.EKS_NAMESPACE,
},
{
"name": "POD_LAUNCHER_NAME",
"valueFrom": {"fieldRef": {"fieldPath": "metadata.name"}},
},
{
"name": "POD_LAUNCHER_UID",
"valueFrom": {"fieldRef": {"fieldPath": "metadata.uid"}},
},
{
"name": "JOB_NAME",
"valueFrom": {"fieldRef": {"fieldPath": "metadata.labels['job-name']"}},
},
{
"name": "JOB_UID",
"valueFrom": {"fieldRef": {"fieldPath": "metadata.labels['controller-uid']"}},
},
{"name": "PARALLELISM", "value": f"{parallelism}"},
{"name": "COMPLETIONS", "value": f"{completions}"},
{"name": "MAX_FAILURES", "value": f"{max_failures}"},
{
"name": "AWS_DEFAULT_REGION",
"value": dag_config.REGION,
},
{
"name": "AWS_ACCOUNT_ID",
"value": dag_config.ACCOUNT_ID,
},
{
"name": "QUEUE_URL",
"value": f"{{{{ ti.xcom_pull(task_ids='{create_queue_task_id}') }}}}",
},
{"name": "WORKER_POD_BODY", "value": worker_pod_body},
],
},
],
},
},
},
}
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=6),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
total_simulations = 50
parallelism = 10
max_failures = 5
create_queue_task = PythonOperator(
task_id="create-queue",
dag=dag,
provide_context=True,
python_callable=create_and_populate_queue,
op_kwargs={"num_items": total_simulations},
)
job_task = EksJobOperator(
task_id="coarse-fan-out-mock",
dag=dag,
namespace=dag_config.EKS_NAMESPACE, # type: ignore
body=get_job_body(
create_queue_task_id=create_queue_task.task_id,
parallelism=parallelism,
completions=total_simulations,
max_failures=max_failures,
),
delete_policy="Never",
cluster_name=dag_config.EKS_CLUSTER_NAME, # type: ignore
service_account_role_arn=dag_config.EKS_SERVICE_ACCOUNT_ROLE, # type: ignore
get_logs=True,
)
fail_if_job_failed_task = PythonOperator(
task_id="fail-if-job-failed",
dag=dag,
provide_context=True,
python_callable=fail_if_job_failed,
op_kwargs={"eks_job_task_id": job_task.task_id},
)
delete_queue_task = PythonOperator(
task_id="delete-queue",
dag=dag,
provide_context=True,
python_callable=delete_queue,
op_kwargs={"create_queue_task_id": create_queue_task.task_id},
)
create_queue_task >> job_task >> fail_if_job_failed_task >> delete_queue_task
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/utils.py | modules/simulations/k8s-managed/simulation_dags/utils.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
from boto3.session import Session
def get_assumed_role_session(role_arn: str) -> Session:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName="AssumeRoleSession1",
)
return Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/__init__.py | modules/simulations/k8s-managed/simulation_dags/__init__.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/simulation_dags/simple_mock.py | modules/simulations/k8s-managed/simulation_dags/simple_mock.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import textwrap
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from simulation_dags import dag_config
from simulation_dags.eks_job_operator import EksJobOperator
DAG_ID = os.path.basename(__file__).replace(".py", "")
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
# caller_identity = PythonOperator(task_id="log_caller_identity", dag=dag, python_callable=log_caller_identity)
envs = {"JOB_NAME": "Simple Mock", "MAX_SECONDS": "120", "FAILURE_SEED": "20"}
default_delete_policy = "IfSucceeded"
body = {
"apiVerson": "batch/v1",
"kind": "Job",
"metadata": {},
"spec": {
"parallelism": 10,
"completions": 50,
"backoffLimit": 6,
"template": {
"spec": {
"restartPolicy": "OnFailure",
"containers": [
{
"name": "job-executor",
"image": "ubuntu",
"command": [
"bash",
"-c",
textwrap.dedent(
"""\
#/usr/bin/env bash
echo "[$(date)] Starting $JOB_NAME"
TIC_COUNT=$RANDOM
let "TIC_COUNT %= $MAX_SECONDS"
FAILURE_CHECK=$RANDOM
let "FAILURE_CHECK %= $FAILURE_SEED"
echo "[$(date)] Random Runtime: $TIC_COUNT"
echo "[$(date)] Random Failure: $FAILURE_CHECK"
CURRENT_COUNT=0
while true; do
CURRENT_COUNT=$((CURRENT_COUNT + 1))
if [ "$CURRENT_COUNT" -ge "$TIC_COUNT" ]; then
break
fi
sleep 1
done
if [ "$FAILURE_CHECK" -eq "0" ]; then
echo "[$(date)] Failure" && exit 1
else
echo "[$(date)] Complete" && exit 0
fi
"""
),
],
}
],
}
},
},
}
job_task = EksJobOperator(
task_id="bash-mock-job",
dag=dag,
namespace=dag_config.EKS_NAMESPACE, # type: ignore
envs=envs,
body=body,
delete_policy=default_delete_policy,
cluster_name=dag_config.EKS_CLUSTER_NAME, # type: ignore
service_account_role_arn=dag_config.EKS_SERVICE_ACCOUNT_ROLE, # type: ignore
)
job_task
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/setup.py | modules/simulations/k8s-managed/images/simulation-mock/src/setup.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name="simulation-mock",
version="0.1.0",
author="AWS Professional Services",
author_email="aws-proserve-opensource@amazon.com",
project_urls={"Org Site": "https://aws.amazon.com/professional-services/"},
packages=find_packages(include=["simulation_mock", "simulation_mock.*"]),
python_requires=">=3.7, <3.14",
install_requires=[
"boto3~=1.21.19",
"platonic-sqs==1.0.1",
"kubernetes~=21.7.0",
"kopf~=1.35.4",
],
include_package_data=True,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/pod_launcher.py | modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/pod_launcher.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import subprocess
from typing import Any, Dict
import kopf
from kubernetes import config as k8_config
from kubernetes import dynamic
from kubernetes.client import api_client
NAMESPACE = os.environ.get("NAMESPACE")
POD_LAUNCHER_NAME = os.environ.get("POD_LAUNCHER_NAME")
POD_LAUNCHER_UID = os.environ.get("POD_LAUNCHER_UID")
JOB_NAME = os.environ.get("JOB_NAME")
JOB_UID = os.environ.get("JOB_UID")
POD_LAUNCHER_BODY = None
COMPLETIONS = 0
FAILURES = 0
def load_config(in_cluster: bool = True) -> None:
in_cluster_env = os.environ.get("IN_CLUSTER_DEPLOYMENT", None)
in_cluster = in_cluster_env.lower() in ["yes", "true", "1"] if in_cluster_env is not None else in_cluster
if in_cluster:
k8_config.load_incluster_config()
else:
k8_config.load_kube_config()
def dynamic_client() -> dynamic.DynamicClient:
load_config()
return dynamic.DynamicClient(client=api_client.ApiClient())
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, memo: kopf.Memo, logger: kopf.Logger, **_: Any) -> None:
settings.persistence.progress_storage = kopf.MultiProgressStorage(
[
kopf.AnnotationsProgressStorage(prefix="simulation-pod-launcher.addf.aws"),
kopf.StatusProgressStorage(field="simulation-pod-launcher.addf.aws"),
]
)
settings.persistence.finalizer = "simulation-pod-launcher.addf.aws/kopf-finalizer"
settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO"))
memo.namespace = os.environ.get("NAMESPACE")
memo.pod_launcher_name = os.environ.get("POD_LAUNCHER_NAME")
memo.pod_launcher_uid = os.environ.get("POD_LAUNCHER_UID")
memo.worker_body = os.environ.get("WORKER_POD_BODY")
memo.queue_url = os.environ.get("QUEUE_URL")
memo.default_region = os.environ.get("AWS_DEFAULT_REGION")
memo.account_id = os.environ.get("AWS_ACCOUNT_ID")
memo.parallelism = int(os.environ.get("PARALLELISM"))
memo.completions = int(os.environ.get("COMPLETIONS"))
memo.max_failures = int(os.environ.get("MAX_FAILURES"))
logger.info("memo.namespace: %s", memo.namespace)
logger.info("memo.pod_launcher_name: %s", memo.pod_launcher_name)
logger.info("memo.pod_launcher_uid: %s", memo.pod_launcher_uid)
logger.info("memo.worker_body: %s", memo.worker_body)
logger.info("memo.queue_url: %s", memo.queue_url)
logger.info("memo.default_region: %s", memo.default_region)
logger.info("memo.account_id: %s", memo.account_id)
logger.info("memo.parallelism: %s", memo.parallelism)
logger.info("JOB_NAME: %s", JOB_NAME)
logger.info("JOB_UID: %s", JOB_UID)
def _should_index_pod(meta: kopf.Meta, **_: Any) -> bool:
return (
"ownerReferences" in meta
and len(meta["ownerReferences"]) > 0
and meta["ownerReferences"][0]["uid"] == POD_LAUNCHER_UID
)
@kopf.index("pods", when=_should_index_pod)
def worker_pods_idx(name: str, status: kopf.Status, **_: Any) -> Dict[str, str]:
status = status.get("phase")
if status not in ["Succeeded", "Failed"]:
return {"Running": name}
elif status == "Succeeded":
global COMPLETIONS
COMPLETIONS += 1
return {"Completed": name}
elif status == "Failed":
global FAILURES
FAILURES += 1
return {"Failed": name}
else:
return {"Unknown": name}
def _should_update_job_metadata(meta: kopf.Meta, **_: Any) -> bool:
return meta["uid"] == POD_LAUNCHER_UID
@kopf.on.resume("pods", when=_should_update_job_metadata)
@kopf.on.create("pods", when=_should_update_job_metadata)
def update_job_metadata(meta: kopf.Meta, body: kopf.Body, memo: kopf.Memo, logger: kopf.Logger, **_: Any) -> str:
global POD_LAUNCHER_BODY
POD_LAUNCHER_BODY = body
logger.debug("POD_LAUNCHER_BODY: %s", POD_LAUNCHER_BODY)
return "METADATA_UPDATED"
def _should_monitor_job(meta: kopf.Meta, **_: Any) -> bool:
return meta["uid"] == JOB_UID
@kopf.daemon("batch", "v1", "jobs", when=_should_monitor_job, initial_delay=30)
def monitor_job(
name: str,
patch: kopf.Patch,
stopped: kopf.DaemonStopped,
memo: kopf.Memo,
logger: kopf.Logger,
worker_pods_idx: kopf.Index,
**_: Any,
) -> str:
logger.info("JOB: %s", name)
worker_spec = json.loads(memo.worker_body)
for container in worker_spec["spec"]["containers"]:
env = container["env"] if "env" in container else []
env.extend(
[
{"name": "NAMESPACE", "value": memo.namespace},
{"name": "JOB_NAME", "value": JOB_NAME},
{"name": "JOB_UID", "value": JOB_UID},
{"name": "POD_LAUNCHER_NAME", "value": memo.pod_launcher_name},
{"name": "AWS_DEFAULT_REGION", "value": memo.default_region},
{"name": "AWS_ACCOUNT_ID", "value": memo.account_id},
{"name": "QUEUE_URL", "value": memo.queue_url},
]
)
kopf.append_owner_reference(objs=[worker_spec], owner=POD_LAUNCHER_BODY)
kopf.harmonize_naming(objs=[worker_spec], name=POD_LAUNCHER_BODY["metadata"]["name"], forced=True)
kopf.adjust_namespace(objs=[worker_spec], namespace=NAMESPACE, forced=True)
logger.debug("WORKER_BODY: %s", worker_spec)
running_pod_count = 0
while not stopped and COMPLETIONS < memo.completions and FAILURES < memo.max_failures:
running_pod_count = len(list(worker_pods_idx.get("Running"))) if "Running" in worker_pods_idx else 0
launch_count = min(
memo.parallelism - running_pod_count,
memo.completions - (COMPLETIONS + running_pod_count),
)
logger.info(
"RUNNING_POD_COUNT: %s COMPLETIONS: %s FAILURES: %s LAUNCHING: %s",
running_pod_count,
COMPLETIONS,
FAILURES,
launch_count,
)
client = dynamic_client()
api = client.resources.get(kind="Pod")
counter = 0
for _ in range(0, launch_count):
counter += 1
try:
api.create(namespace=NAMESPACE, body=worker_spec)
except Exception as e:
logger.exception("LAUNCH_ERROR", e)
if counter % 10 == 0:
stopped.wait(0.5)
stopped.wait(10)
if COMPLETIONS < memo.completions and FAILURES >= memo.max_failures:
logger.info("FAILED")
patch["metadata"] = {
"annotations": {
"simulation-pod-launcher.addf.aws/status": "Failed",
"simulation-pod-launcher.addf.aws/completions": f"{COMPLETIONS}",
"simulation-pod-launcher.addf.aws/failures": f"{FAILURES}",
}
}
patch["status"] = {"phase": "Failed"}
status = "JOB_FAILED"
else:
logger.info("COMPLETE")
patch["metadata"] = {
"annotations": {
"simulation-pod-launcher.addf.aws/status": "Succeeded",
"simulation-pod-launcher.addf.aws/completions": f"{COMPLETIONS}",
"simulation-pod-launcher.addf.aws/failures": f"{FAILURES}",
}
}
status = "JOB_COMPLETED"
json_status = json.dumps({"status": status})
logger.info("WILL_EXIT_IN_30")
# Write status out so that it can be picked up by the XCom Value Parser
# And fork a subprocess to send a SIGTERM and initiate graceful shutdown
subprocess.Popen(
f"sleep 30; echo '::kube_api:xcom={json_status}'; echo 'Sending SIGTERM'; kill -15 {os.getpid()}",
shell=True,
)
return status
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/signal_handler.py | modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/signal_handler.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import sys
from threading import Event
class SignalHandler:
kill_now = False
def __init__(self, exit: Event):
self.exit = exit
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
signal.signal(signal.SIGABRT, self.exit_ungracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
self.exit.set()
def exit_ungracefully(self, signum, frame):
sys.exit(1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/__init__.py | modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/__init__.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d][%(levelname)s][%(threadName)s] %(message)s"
def get_logger() -> logging.Logger:
debug = os.environ.get("DEBUG", "False").lower() in [
"true",
"yes",
"1",
]
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format=DEBUG_LOGGING_FORMAT)
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(level)
if debug:
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
return logger
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/sqs_manager.py | modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/sqs_manager.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from contextlib import contextmanager
from datetime import timedelta
from threading import Event
from typing import Iterator
from platonic.queue.errors import MessageReceiveTimeout
from platonic.sqs.queue import SQSReceiver
from platonic.sqs.queue.message import SQSMessage
from platonic.sqs.queue.types import ValueType
from platonic.timeout import ConstantTimeout
from simulation_mock import get_logger
from simulation_mock.signal_handler import SignalHandler
MESSAGE_RECEIVE_TIMEOUT_SECONDS = 10
PROCESSING_TIMEOUT_MINUTES = 5
FILE_CHECK_SLEEP_TIME_SECONDS = 10
DATA_FILE_NAME = "message.json"
LOGGER = get_logger()
class ProcessingTimeoutException(Exception):
pass
class SQSHeartbeatReceiver(SQSReceiver[ValueType]):
def heartbeat(self, message: SQSMessage[ValueType], seconds: int) -> None:
"""Extend the visibility timeout of Messages being processed
Parameters
----------
message : SQSMessage[ValueType]
The SQS Message to extend
seconds : int
Number of seconds to extend the timeout
"""
self.client.change_message_visibility(
QueueUrl=self.url,
ReceiptHandle=message.receipt_handle,
VisibilityTimeout=seconds,
)
@contextmanager
def acknowledgement(self, message: SQSMessage[ValueType]) -> Iterator[SQSMessage[ValueType]]:
"""Override of ``acknowledgement`` that won't acknowledge (delete) them message if an Exception is thrown
Parameters
----------
message : SQSMessage[ValueType]
The SQS Message to conditionally acknowledge
Yields
------
SQSMessage[ValueType]
The SQS Message
Raises
------
e
Any exception that was thrown
"""
try: # noqa: WPS501
yield message
except Exception as e:
LOGGER.debug("NAK: %s - %s", message, e)
raise e
else:
LOGGER.debug("ACK: %s", message)
self.acknowledge(message)
def main(url: str, dir: str, single_message: bool) -> int:
LOGGER.info("QUEUE: %s", url)
LOGGER.info("DIR: %s", dir)
LOGGER.info("SINGLE_MESSAGE: %s", single_message)
incoming_simulations = SQSHeartbeatReceiver[str](
url=url,
timeout=ConstantTimeout(period=timedelta(seconds=MESSAGE_RECEIVE_TIMEOUT_SECONDS)),
)
signal_handler = SignalHandler(exit=Event())
while True:
try:
if signal_handler.kill_now:
return 0
with incoming_simulations.acknowledgement(incoming_simulations.receive()) as message:
data_file = os.path.join(dir, "message.json")
LOGGER.info("MSG: %s", message)
LOGGER.info("DATA_FILE: %s", data_file)
with open(data_file, "w") as file:
file.write(message.value)
timeout = ConstantTimeout(timedelta(minutes=PROCESSING_TIMEOUT_MINUTES))
with timeout.timer() as timer:
while not timer.is_expired:
if signal_handler.kill_now:
return 0
if os.path.isfile(data_file):
LOGGER.debug("EXISTS: %s", data_file)
incoming_simulations.heartbeat(message=message, seconds=(60))
signal_handler.exit.wait(FILE_CHECK_SLEEP_TIME_SECONDS)
else:
LOGGER.debug("GONE: %s", data_file)
break
else:
raise ProcessingTimeoutException("TIMEOUT")
except MessageReceiveTimeout:
LOGGER.info("EMPTY: %s", url)
return 0
except ProcessingTimeoutException:
LOGGER.error("TIMEOUT: %s", PROCESSING_TIMEOUT_MINUTES)
if single_message:
return 1
else:
LOGGER.info("SUCCESS: %s", message)
if single_message:
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Manage SQS Messages")
parser.add_argument("--url", required=True, help="URL of the SQS Queue to manage")
parser.add_argument("--dir", required=True, help="Directory to use/monitor for data files")
parser.add_argument(
"--single-message",
action="store_true",
help="Whether to retrieve/process a single message or process messages until the queue is empty",
)
args = parser.parse_args()
LOGGER.debug("ARGS: %s", args)
with open("/tmp/container.running", "w") as file:
LOGGER.info("LIVENESS_FILE: /tmp/container.running")
sys.exit(main(url=args.url, dir=args.dir, single_message=args.single_message))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/simulator.py | modules/simulations/k8s-managed/images/simulation-mock/src/simulation_mock/simulator.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from datetime import timedelta
from random import randint
from threading import Event
from platonic.timeout import ConstantTimeout
from simulation_mock import get_logger
from simulation_mock.signal_handler import SignalHandler
FILE_CHECK_TIMEOUT_SECONDS = 30
DATA_FILE_NAME = "message.json"
FILE_CHECK_SLEEP_TIME_SECONDS = 10
LOGGER = get_logger()
class RandomFailureException(Exception):
pass
class FileCheckTimoutException(Exception):
pass
def main(dir: str, max_seconds: int, failure_seed: int) -> int:
LOGGER.info("DIR: %s", dir)
LOGGER.info("MAX_SECONDS: %s", max_seconds)
LOGGER.info("FAILURE_SEED: %s", failure_seed)
signal_handler = SignalHandler(exit=Event())
while True:
try:
if signal_handler.kill_now:
return 0
timeout = ConstantTimeout(timedelta(seconds=FILE_CHECK_TIMEOUT_SECONDS))
with timeout.timer() as timer:
data_file = os.path.join(dir, "message.json")
LOGGER.info("DATA_FILE: %s", data_file)
while not timer.is_expired:
if signal_handler.kill_now:
return 0
if os.path.isfile(data_file):
LOGGER.info("EXISTS: %s", data_file)
sleep_time = randint(0, max_seconds)
LOGGER.info("RANDOM_SLEEP: %s", sleep_time)
signal_handler.exit.wait(sleep_time)
if (randint(1, 32768) % failure_seed) == 0:
raise RandomFailureException("RANDOM")
else:
LOGGER.info("REMOVING: %s", data_file)
os.remove(data_file)
break
else:
LOGGER.debug("NOT_EXISTS: %s", data_file)
signal_handler.exit.wait(FILE_CHECK_SLEEP_TIME_SECONDS)
else:
raise FileCheckTimoutException("TIMEOUT")
except RandomFailureException:
LOGGER.error("RANDOM_FAILURE")
return 1
except FileCheckTimoutException:
LOGGER.info("EXISTS_TIMEOUT: %s", data_file)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mock simulation")
parser.add_argument("--dir", required=True, help="Directory to use/monitor for data files")
parser.add_argument(
"--max-seconds",
required=True,
type=int,
help="Max runtime to execute mock simulation",
)
parser.add_argument(
"--failure-seed",
required=True,
type=int,
help="Seed number to determine random failures",
)
args = parser.parse_args()
LOGGER.debug("ARGS: %s", args)
with open("/tmp/container.running", "w") as file:
LOGGER.info("LIVENESS_FILE: /tmp/container.running")
sys.exit(main(dir=args.dir, max_seconds=args.max_seconds, failure_seed=args.failure_seed))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/stack.py | modules/simulations/batch-managed/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, Dict, List, cast
import aws_cdk.aws_batch_alpha as batch
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class BatchDags(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
mwaa_exec_role: str,
vpc_id: str,
private_subnet_ids: List[str],
batch_compute: Dict[str, Any],
**kwargs: Any,
) -> None:
# Env vars
self.deployment_name = deployment_name
self.module_name = module_name
self.mwaa_exec_role = mwaa_exec_role
super().__init__(
scope,
id,
description="(SO9154) Autonomous Driving Data Framework (ADDF) - batch-managed",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment_name}")
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
self.private_subnets = []
for idx, subnet_id in enumerate(private_subnet_ids):
self.private_subnets.append(ec2.Subnet.from_subnet_id(scope=self, id=f"subnet{idx}", subnet_id=subnet_id))
# Create Dag IAM Role and policy
policy_statements = [
iam.PolicyStatement(
actions=["sqs:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:sqs:{self.region}:{self.account}:{dep_mod}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=[
"batch:UntagResource",
"batch:DeregisterJobDefinition",
"batch:TerminateJob",
"batch:CancelJob",
"batch:SubmitJob",
"batch:RegisterJobDefinition",
"batch:TagResource",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:batch:{self.region}:{self.account}:job-queue/addf*",
f"arn:{self.partition}:batch:{self.region}:{self.account}:job-definition/*",
f"arn:{self.partition}:batch:{self.region}:{self.account}:job/*",
],
),
iam.PolicyStatement(
actions=[
"iam:PassRole",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:iam::{self.account}:role/addf*",
],
),
iam.PolicyStatement(
actions=[
"batch:Describe*",
"batch:List*",
],
effect=iam.Effect.ALLOW,
resources=[
"*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
batch_role_name = f"{dep_mod}-dag-role"
self.dag_role = iam.Role(
self,
f"dag-role-{dep_mod}",
assumed_by=iam.CompositePrincipal(
iam.ArnPrincipal(self.mwaa_exec_role),
iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy")
],
role_name=batch_role_name,
path="/",
)
ec2Role = iam.Role(
self,
"BatchEC2Role",
assumed_by=iam.CompositePrincipal(iam.ServicePrincipal("ec2.amazonaws.com")),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role"),
],
)
ec2IAMProfile = iam.CfnInstanceProfile(self, "BatchEC2RoleInstanceProfile", roles=[ec2Role.role_name])
batchSG = ec2.SecurityGroup(
self,
"BatchSG",
vpc=self.vpc,
allow_all_outbound=True,
description="Batch SG",
)
batchSG.add_egress_rule(ec2.Peer.ipv4(self.vpc.vpc_cidr_block), ec2.Port.all_tcp())
# Creates Compute Env conditionally
batch_compute_config = batch_compute.get("batch_compute_config")
on_demand_compute_env_list = []
spot_compute_env_list = []
fargate_compute_env_list = []
if batch_compute_config:
for batchenv in batch_compute_config:
if batchenv.get("compute_type").upper().startswith("ON"):
instance_types_context = batchenv.get("instance_types")
instance_types = []
if instance_types_context:
for value in instance_types_context:
instance_type = ec2.InstanceType(value)
instance_types.append(instance_type)
on_demand_compute_env = batch.ComputeEnvironment(
self,
f"{dep_mod}-OnDemandComputeEnv-{batchenv.get('env_name')}",
compute_resources=batch.ComputeResources(
instance_role=ec2IAMProfile.attr_arn,
vpc=self.vpc,
instance_types=instance_types if instance_types else None,
maxv_cpus=batchenv.get("max_vcpus") if batchenv.get("max_vcpus") else "256", # type: ignore
minv_cpus=0,
type=batch.ComputeResourceType.ON_DEMAND,
vpc_subnets=ec2.SubnetSelection(subnets=self.private_subnets),
security_groups=[batchSG],
),
)
on_demand_compute_env_list.append(
batch.JobQueueComputeEnvironment(
compute_environment=on_demand_compute_env,
order=int(batchenv.get("order")),
)
)
elif batchenv.get("compute_type").upper() == "SPOT":
instance_types_context = batchenv.get("instance_types")
instance_types = []
if instance_types_context:
for value in instance_types_context:
instance_type = ec2.InstanceType(value)
instance_types.append(instance_type)
spot_compute_env = batch.ComputeEnvironment(
self,
f"{dep_mod}-SpotComputeEnv-{batchenv.get('env_name')}",
compute_resources=batch.ComputeResources(
instance_role=ec2IAMProfile.attr_arn,
vpc=self.vpc,
instance_types=instance_types if instance_types else None,
maxv_cpus=batchenv.get("max_vcpus") if batchenv.get("max_vcpus") else "256", # type: ignore
minv_cpus=0,
type=batch.ComputeResourceType.SPOT,
vpc_subnets=ec2.SubnetSelection(subnets=self.private_subnets),
security_groups=[batchSG],
allocation_strategy=batch.AllocationStrategy("SPOT_CAPACITY_OPTIMIZED"),
),
)
spot_compute_env_list.append(
batch.JobQueueComputeEnvironment(
compute_environment=spot_compute_env,
order=int(batchenv.get("order")),
)
)
else:
fargate_compute_env = batch.ComputeEnvironment(
self,
f"{dep_mod}-FargateJobEnv-{batchenv.get('env_name')}",
compute_resources=batch.ComputeResources(
type=batch.ComputeResourceType.FARGATE,
vpc_subnets=ec2.SubnetSelection(subnets=self.private_subnets),
vpc=self.vpc,
),
)
fargate_compute_env_list.append(
batch.JobQueueComputeEnvironment(
compute_environment=fargate_compute_env,
order=int(batchenv.get("order")),
)
)
if on_demand_compute_env_list:
self.on_demand_jobqueue = batch.JobQueue(
self,
f"{dep_mod}-OndemandJobQueue",
compute_environments=on_demand_compute_env_list,
job_queue_name=f"{dep_mod}-OnDemandJobQueue",
priority=1,
)
if spot_compute_env_list:
self.spot_jobqueue = batch.JobQueue(
self,
f"{dep_mod}-SpotJobQueue",
compute_environments=spot_compute_env_list,
job_queue_name=f"{dep_mod}-SpotJobQueue",
priority=1,
)
if fargate_compute_env_list:
self.fargate_jobqueue = batch.JobQueue(
self,
f"{dep_mod}-FargateJobQueue",
compute_environments=fargate_compute_env_list,
job_queue_name=f"{dep_mod}-FargateJobQueue",
priority=1,
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{ # type: ignore
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/app.py | modules/simulations/batch-managed/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
from aws_cdk import App, CfnOutput, Environment
from stack import BatchDags
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID")) # required
private_subnet_ids = json.loads(os.getenv(_param("PRIVATE_SUBNET_IDS"), "")) # required
batch_compute = json.loads(os.getenv(_param("BATCH_COMPUTE"), "")) # required
mwaa_exec_role = os.getenv(_param("MWAA_EXEC_ROLE"))
if not vpc_id:
raise Exception("missing input parameter vpc-id")
if not private_subnet_ids:
raise Exception("missing input parameter private-subnet-ids")
if not batch_compute:
raise ValueError("Batch Compute Configuration is missing.")
if not mwaa_exec_role:
raise ValueError("MWAA Execution Role is missing.")
app = App()
stack = BatchDags(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
mwaa_exec_role=mwaa_exec_role,
vpc_id=vpc_id,
private_subnet_ids=private_subnet_ids,
batch_compute=batch_compute,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"DagRoleArn": stack.dag_role.role_arn,
"OnDemandJobQueueArn": stack.on_demand_jobqueue.job_queue_arn
if hasattr(stack, "on_demand_jobqueue")
else "QUEUE NOT CREATED",
"SpotJobQueueArn": stack.spot_jobqueue.job_queue_arn
if hasattr(stack, "spot_jobqueue")
else "QUEUE NOT CREATED",
"FargateJobQueueArn": stack.fargate_jobqueue.job_queue_arn
if hasattr(stack, "fargate_jobqueue")
else "QUEUE NOT CREATED",
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/simulation_batch_dags/batch_simple_mock_fargate.py | modules/simulations/batch-managed/simulation_batch_dags/batch_simple_mock_fargate.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import random
import string
import textwrap
from datetime import timedelta
from typing import TypeVar
import boto3
from airflow import DAG, settings
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.operators.python import PythonOperator
from airflow.providers.amazon.aws.operators.batch import AwsBatchOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
from mypy_boto3_batch.client import BatchClient
from simulation_batch_dags import batch_dag_config
ValueType = TypeVar("ValueType")
DAG_ID = os.path.basename(__file__).replace(".py", "")
TASK_DEF_XCOM_KEY = "job_definition_name"
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
def try_create_aws_conn(**kwargs):
conn_id = "aws_batch"
try:
AwsHook.get_connection(conn_id)
except AirflowException:
extra = json.dumps({"role_arn": batch_dag_config.DAG_ROLE}, indent=2)
conn = Connection(conn_id=conn_id, conn_type="aws", host="", schema="", login="", extra=extra)
try:
session = settings.Session()
session.add(conn)
session.commit()
finally:
session.close()
def get_job_queue_name() -> str:
"""get_job_queue_name retrieves the the available JobQueues created
based on the inputs of the batch-compute manifest file.
"""
# ON_DEMAND_JOB_QUEUE_ARN = batch_dag_config.ON_DEMAND_JOB_QUEUE_ARN # consume if created
# SPOT_JOB_QUEUE_ARN = batch_dag_config.SPOT_JOB_QUEUE_ARN # consume if created
FARGATE_JOB_QUEUE_ARN = batch_dag_config.FARGATE_JOB_QUEUE_ARN # consume if created
return FARGATE_JOB_QUEUE_ARN.split("/")[-1]
def get_job_name() -> str:
v = "".join(random.choice(string.ascii_lowercase) for i in range(6))
return f"addf-{batch_dag_config.DEPLOYMENT_NAME}-{batch_dag_config.MODULE_NAME}-simplemock-fargate-job-{v}"
def get_job_def_name() -> str:
# v = "".join(random.choice(string.ascii_lowercase) for i in range(6))
# return f"addf-{batch_dag_config.DEPLOYMENT_NAME}-{batch_dag_config.MODULE_NAME}-jobdef-{v}"
return f"addf-{batch_dag_config.DEPLOYMENT_NAME}-{batch_dag_config.MODULE_NAME}-simplemock-fargate-jobdef"
def get_batch_client() -> BatchClient:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=batch_dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session.client("batch")
def register_job_definition_fargate(ti, job_def_name: str) -> str:
client = get_batch_client()
resp = client.register_job_definition(
jobDefinitionName=job_def_name,
type="container",
containerProperties={
"image": "ubuntu",
"jobRoleArn": batch_dag_config.DAG_ROLE,
"executionRoleArn": batch_dag_config.DAG_ROLE,
"environment": [
{
"name": "AWS_DEFAULT_REGION",
"value": batch_dag_config.REGION,
}
],
"resourceRequirements": [
{"value": ".25", "type": "VCPU"},
{"value": "1024", "type": "MEMORY"},
],
},
propagateTags=True,
timeout={"attemptDurationSeconds": 60},
platformCapabilities=["FARGATE"],
)
ti.xcom_push(key=TASK_DEF_XCOM_KEY, value=resp["jobDefinitionName"])
def deregister_job_definition(ti, job_def_name: str) -> None:
client = get_batch_client()
response = client.deregister_job_definition(jobDefinition=job_def_name)
ti.xcom_push(key="TASK_DEF_DEREGISTER_XCOM_KEY", value=response["ResponseMetadata"])
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
total_simulations = 50
parallelism = 10
job_definition_name = get_job_def_name()
job_name = get_job_name()
queue_name = get_job_queue_name()
create_aws_conn = PythonOperator(
task_id="try_create_aws_conn",
python_callable=try_create_aws_conn,
dag=dag,
)
register_batch_job_defintion = PythonOperator(
task_id="register_batch_job_definition",
dag=dag,
provide_context=True,
python_callable=register_job_definition_fargate,
op_kwargs={"job_def_name": job_definition_name},
)
submit_batch_job = AwsBatchOperator(
task_id="submit_batch_job",
job_name=job_name,
job_queue=queue_name,
aws_conn_id="aws_batch",
# job_definition="{{ task_instance.xcom_pull(task_ids='register_batch_job_defintion', key='job_definition_name') }}", # noqa: E501
job_definition="addf-local-simulations-batch-managed-simplemock-fargate-jobdef",
overrides={
"command": [
"bash",
"-c",
textwrap.dedent(
"""\
#/usr/bin/env bash
echo "[$(date)] Starting $JOB_NAME"
TIC_COUNT=$RANDOM
let "TIC_COUNT %= $MAX_SECONDS"
FAILURE_CHECK=$RANDOM
let "FAILURE_CHECK %= $FAILURE_SEED"
echo "[$(date)] Random Runtime: $TIC_COUNT"
echo "[$(date)] Random Failure: $FAILURE_CHECK"
CURRENT_COUNT=0
while true; do
CURRENT_COUNT=$((CURRENT_COUNT + 1))
if [ "$CURRENT_COUNT" -ge "$TIC_COUNT" ]; then
break
fi
sleep 1
done
if [ "$FAILURE_CHECK" -eq "0" ]; then
echo "[$(date)] Failure" && exit 1
else
echo "[$(date)] Complete" && exit 0
fi
"""
),
],
"environment": [
{"name": "JOB_NAME", "value": "Simple Batch Mock"},
{"name": "MAX_SECONDS", "value": "120"},
{"name": "FAILURE_SEED", "value": "20"},
{"name": "DEBUG", "value": "true"},
{"name": "AWS_ACCOUNT_ID", "value": batch_dag_config.ACCOUNT_ID},
],
},
)
deregister_batch_job_defintion = PythonOperator(
task_id="deregister_batch_job_defintion",
dag=dag,
provide_context=True,
op_kwargs={
# "job_def_name": "{{ task_instance.xcom_pull(task_ids='deregister_batch_job_defintion', key='job_definition_name') }}" # noqa: E501
"job_def_name": "addf-local-simulations-batch-managed-simplemock-fargate-jobdef"
},
python_callable=deregister_job_definition,
)
(create_aws_conn >> register_batch_job_defintion >> submit_batch_job >> deregister_batch_job_defintion)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/simulation_batch_dags/batch_simple_mock_ec2.py | modules/simulations/batch-managed/simulation_batch_dags/batch_simple_mock_ec2.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import random
import string
import textwrap
from datetime import timedelta
from typing import TypeVar
import boto3
from airflow import DAG, settings
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.operators.python import PythonOperator
from airflow.providers.amazon.aws.operators.batch import AwsBatchOperator
from airflow.utils.dates import days_ago
from boto3.session import Session
from mypy_boto3_batch.client import BatchClient
from simulation_batch_dags import batch_dag_config
ValueType = TypeVar("ValueType")
DAG_ID = os.path.basename(__file__).replace(".py", "")
TASK_DEF_XCOM_KEY = "job_definition_arn"
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
}
logger = logging.getLogger("airflow")
logger.setLevel("DEBUG")
def try_create_aws_conn(**kwargs):
conn_id = "aws_batch"
try:
AwsHook.get_connection(conn_id)
except AirflowException:
extra = json.dumps({"role_arn": batch_dag_config.DAG_ROLE}, indent=2)
conn = Connection(conn_id=conn_id, conn_type="aws", host="", schema="", login="", extra=extra)
try:
session = settings.Session()
session.add(conn)
session.commit()
finally:
session.close()
def get_job_queue_name() -> str:
"""get_job_queue_name retrieves the the available JobQueues created
based on the inputs of the batch-compute manifest file.
"""
ON_DEMAND_JOB_QUEUE_ARN = batch_dag_config.ON_DEMAND_JOB_QUEUE_ARN # consume if created
# SPOT_JOB_QUEUE_ARN = batch_dag_config.SPOT_JOB_QUEUE_ARN # consume if created
# FARGATE_JOB_QUEUE_ARN = batch_dag_config.FARGATE_JOB_QUEUE_ARN # consume if created
return ON_DEMAND_JOB_QUEUE_ARN.split("/")[-1]
def get_job_name() -> str:
v = "".join(random.choice(string.ascii_lowercase) for i in range(6))
return "-".join(
[
batch_dag_config.PROJECT_NAME,
batch_dag_config.DEPLOYMENT_NAME,
batch_dag_config.MODULE_NAME,
"simplemock-job",
v,
]
)
def get_job_def_name() -> str:
return "-".join(
[
batch_dag_config.PROJECT_NAME,
batch_dag_config.DEPLOYMENT_NAME,
batch_dag_config.MODULE_NAME,
"simplemock-jobdef",
]
)
def get_batch_client() -> BatchClient:
sts_client = boto3.client("sts")
response = sts_client.assume_role(
RoleArn=batch_dag_config.DAG_ROLE,
RoleSessionName="AssumeRoleSession1",
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
return session.client("batch")
def register_job_definition_on_demand(ti, job_def_name: str) -> str:
client = get_batch_client()
resp = client.register_job_definition(
jobDefinitionName=job_def_name,
type="container",
containerProperties={
"image": "ubuntu",
"jobRoleArn": batch_dag_config.DAG_ROLE,
"environment": [
{
"name": "AWS_DEFAULT_REGION",
"value": batch_dag_config.REGION,
}
],
"resourceRequirements": [
{"value": "1", "type": "VCPU"},
{"value": "512", "type": "MEMORY"},
],
},
propagateTags=True,
timeout={"attemptDurationSeconds": 60},
platformCapabilities=["EC2"],
)
ti.xcom_push(key=TASK_DEF_XCOM_KEY, value=resp["jobDefinitionArn"])
def deregister_job_definition(ti, job_def_arn: str) -> None:
client = get_batch_client()
response = client.deregister_job_definition(jobDefinition=job_def_arn)
ti.xcom_push(key="TASK_DEF_DEREGISTER_XCOM_KEY", value=response["ResponseMetadata"])
with DAG(
dag_id=DAG_ID,
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1), # type: ignore
schedule_interval="@once",
) as dag:
total_simulations = 50
parallelism = 10
job_definition_name = get_job_def_name()
job_name = get_job_name()
queue_name = get_job_queue_name()
create_aws_conn = PythonOperator(
task_id="try_create_aws_conn",
python_callable=try_create_aws_conn,
dag=dag,
)
register_batch_job_defintion = PythonOperator(
task_id="register-batch-job-definition",
dag=dag,
provide_context=True,
python_callable=register_job_definition_on_demand,
op_kwargs={"job_def_name": job_definition_name},
)
submit_batch_job = AwsBatchOperator(
task_id="submit_batch_job",
job_name=job_name,
job_queue=queue_name,
aws_conn_id="aws_batch",
# job_definition="{{ task_instance.xcom_pull(task_ids='register_batch_job_defintion', key='job_definition_name') }}", # noqa: E501
job_definition=f"{batch_dag_config.PROJECT_NAME}-local-simulations-batch-managed-simplemock-jobdef", # TODO
overrides={
"command": [
"bash",
"-c",
textwrap.dedent(
"""\
#/usr/bin/env bash
echo "[$(date)] Starting $JOB_NAME"
TIC_COUNT=$RANDOM
let "TIC_COUNT %= $MAX_SECONDS"
FAILURE_CHECK=$RANDOM
let "FAILURE_CHECK %= $FAILURE_SEED"
echo "[$(date)] Random Runtime: $TIC_COUNT"
echo "[$(date)] Random Failure: $FAILURE_CHECK"
CURRENT_COUNT=0
while true; do
CURRENT_COUNT=$((CURRENT_COUNT + 1))
if [ "$CURRENT_COUNT" -ge "$TIC_COUNT" ]; then
break
fi
sleep 1
done
if [ "$FAILURE_CHECK" -eq "0" ]; then
echo "[$(date)] Failure" && exit 1
else
echo "[$(date)] Complete" && exit 0
fi
"""
),
],
"environment": [
{"name": "JOB_NAME", "value": "Simple Batch Mock"},
{"name": "MAX_SECONDS", "value": "120"},
{"name": "FAILURE_SEED", "value": "20"},
{"name": "DEBUG", "value": "true"},
{"name": "AWS_ACCOUNT_ID", "value": batch_dag_config.ACCOUNT_ID},
],
},
)
deregister_batch_job_defintion = PythonOperator(
task_id="deregister_batch_job_defintion",
dag=dag,
provide_context=True,
op_kwargs={
# "job_def_arn": "{{ task_instance.xcom_pull(task_ids='deregister_batch_job_defintion', key='job_definition_arn') }}" # noqa: E501
"job_def_arn": f"{batch_dag_config.PROJECT_NAME}-local-simulations-batch-managed-simplemock-jobdef"
},
python_callable=deregister_job_definition,
)
(create_aws_conn >> register_batch_job_defintion >> submit_batch_job >> deregister_batch_job_defintion)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/simulation_batch_dags/batch_dag_config.py | modules/simulations/batch-managed/simulation_batch_dags/batch_dag_config.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is populated with configurations information when the Module is deployed
# Configuration parameters are exported as module level constants
#
# Example:
# SOME_PARAMETER = 'some value'
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/simulation_batch_dags/__init__.py | modules/simulations/batch-managed/simulation_batch_dags/__init__.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/tests/test_stack.py | modules/simulations/batch-managed/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/tests/__init__.py | modules/simulations/batch-managed/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/images/simulation-mock/src/setup.py | modules/simulations/batch-managed/images/simulation-mock/src/setup.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name="simulation-mock",
version="0.1.0",
author="AWS Professional Services",
author_email="aws-proserve-opensource@amazon.com",
project_urls={"Org Site": "https://aws.amazon.com/professional-services/"},
packages=find_packages(include=["simulation_mock", "simulation_mock.*"]),
python_requires=">=3.7, <3.14",
install_requires=["boto3~=1.21.19", "platonic-sqs==1.0.1"],
include_package_data=True,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/__init__.py | modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/__init__.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d][%(levelname)s][%(threadName)s] %(message)s"
def get_logger() -> logging.Logger:
debug = os.environ.get("DEBUG", "False").lower() in [
"true",
"yes",
"1",
]
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format=DEBUG_LOGGING_FORMAT)
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(level)
if debug:
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
return logger
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/sqs_manager.py | modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/sqs_manager.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import time
from contextlib import contextmanager
from datetime import timedelta
from typing import Iterator
from platonic.queue.errors import MessageReceiveTimeout
from platonic.sqs.queue import SQSReceiver
from platonic.sqs.queue.message import SQSMessage
from platonic.sqs.queue.types import ValueType
from platonic.timeout import ConstantTimeout
from simulation_mock import get_logger
MESSAGE_RECEIVE_TIMEOUT_SECONDS = 90
PROCESSING_TIMEOUT_MINUTES = 5
FILE_CHECK_SLEEP_TIME_SECONDS = 30
DATA_FILE_NAME = "message.json"
LOGGER = get_logger()
class ProcessingTimeoutException(Exception):
pass
class SQSHeartbeatReceiver(SQSReceiver[ValueType]):
def heartbeat(self, message: SQSMessage[ValueType], seconds: int) -> None:
"""Extend the visibility timeout of Messages being processed
Parameters
----------
message : SQSMessage[ValueType]
The SQS Message to extend
seconds : int
Number of seconds to extend the timeout
"""
self.client.change_message_visibility(
QueueUrl=self.url,
ReceiptHandle=message.receipt_handle,
VisibilityTimeout=seconds,
)
@contextmanager
def acknowledgement(self, message: SQSMessage[ValueType]) -> Iterator[SQSMessage[ValueType]]:
"""Override of ``acknowledgement`` that won't acknowledge (delete) them message if an Exception is thrown
Parameters
----------
message : SQSMessage[ValueType]
The SQS Message to conditionally acknowledge
Yields
------
SQSMessage[ValueType]
The SQS Message
Raises
------
e
Any exception that was thrown
"""
try: # noqa: WPS501
yield message
except Exception as e:
LOGGER.debug("NAK: %s - %s", message, e)
raise e
else:
LOGGER.debug("ACK: %s", message)
self.acknowledge(message)
def main(url: str, dir: str, single_message: bool) -> int:
LOGGER.info("QUEUE: %s", url)
LOGGER.info("DIR: %s", dir)
LOGGER.info("SINGLE_MESSAGE: %s", single_message)
incoming_simulations = SQSHeartbeatReceiver[str](
url=url,
timeout=ConstantTimeout(period=timedelta(seconds=MESSAGE_RECEIVE_TIMEOUT_SECONDS)),
)
while True:
try:
with incoming_simulations.acknowledgement(incoming_simulations.receive()) as message:
data_file = os.path.join(dir, "message.json")
LOGGER.info("MSG: %s", message)
LOGGER.info("DATA_FILE: %s", data_file)
with open(data_file, "w") as file:
file.write(message.value)
timeout = ConstantTimeout(timedelta(minutes=PROCESSING_TIMEOUT_MINUTES))
with timeout.timer() as timer:
while not timer.is_expired:
if os.path.isfile(data_file):
LOGGER.debug("EXISTS: %s", data_file)
incoming_simulations.heartbeat(
message=message,
seconds=(MESSAGE_RECEIVE_TIMEOUT_SECONDS - 30),
)
time.sleep(FILE_CHECK_SLEEP_TIME_SECONDS)
else:
LOGGER.debug("GONE: %s", data_file)
break
else:
raise ProcessingTimeoutException("TIMEOUT")
except MessageReceiveTimeout:
LOGGER.info("EMPTY: %s", url)
return 0
except ProcessingTimeoutException:
LOGGER.error("TIMEOUT: %s", PROCESSING_TIMEOUT_MINUTES)
if single_message:
return 1
else:
LOGGER.info("SUCCESS: %s", message)
if single_message:
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Manage SQS Messages")
parser.add_argument("--url", required=True, help="URL of the SQS Queue to manage")
parser.add_argument("--dir", required=True, help="Directory to use/monitor for data files")
parser.add_argument(
"--single-message",
action="store_true",
help="Whether to retrieve/process a single message or process messages until the queue is empty",
)
args = parser.parse_args()
LOGGER.debug("ARGS: %s", args)
sys.exit(main(url=args.url, dir=args.dir, single_message=args.single_message))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/simulator.py | modules/simulations/batch-managed/images/simulation-mock/src/simulation_mock/simulator.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import time
from datetime import timedelta
from random import randint
from platonic.timeout import ConstantTimeout
from simulation_mock import get_logger
FILE_CHECK_TIMEOUT_SECONDS = 120
DATA_FILE_NAME = "message.json"
FILE_CHECK_SLEEP_TIME_SECONDS = 10
LOGGER = get_logger()
class RandomFailureException(Exception):
pass
class FileCheckTimoutException(Exception):
pass
def main(dir: str, max_seconds: int, failure_seed: int) -> int:
LOGGER.info("DIR: %s", dir)
LOGGER.info("MAX_SECONDS: %s", max_seconds)
LOGGER.info("FAILURE_SEED: %s", failure_seed)
while True:
try:
timeout = ConstantTimeout(timedelta(seconds=FILE_CHECK_TIMEOUT_SECONDS))
with timeout.timer() as timer:
data_file = os.path.join(dir, "message.json")
LOGGER.info("DATA_FILE: %s", data_file)
while not timer.is_expired:
if os.path.isfile(data_file):
LOGGER.info("EXISTS: %s", data_file)
sleep_time = randint(0, max_seconds)
LOGGER.info("RANDOM_SLEEP: %s", sleep_time)
time.sleep(sleep_time)
if (randint(0, 32768) % failure_seed) == 0:
raise RandomFailureException("RANDOM")
else:
LOGGER.info("REMOVING: %s", data_file)
os.remove(data_file)
break
else:
LOGGER.debug("NOT_EXISTS: %s", data_file)
time.sleep(FILE_CHECK_SLEEP_TIME_SECONDS)
else:
raise FileCheckTimoutException("TIMEOUT")
except RandomFailureException:
LOGGER.error("RANDOM_FAILURE")
return 1
except FileCheckTimoutException:
LOGGER.info("EXISTS_TIMEOUT: %s", data_file)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mock simulation")
parser.add_argument("--dir", required=True, help="Directory to use/monitor for data files")
parser.add_argument(
"--max-seconds",
required=True,
type=int,
help="Max runtime to execute mock simulation",
)
parser.add_argument(
"--failure-seed",
required=True,
type=int,
help="Seed number to determine random failures",
)
args = parser.parse_args()
LOGGER.debug("ARGS: %s", args)
sys.exit(main(dir=args.dir, max_seconds=args.max_seconds, failure_seed=args.failure_seed))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/stack.py | modules/service-catalog/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Any, cast
import aws_cdk.aws_iam as iam
import aws_cdk.aws_s3_assets as assets
import aws_cdk.aws_servicecatalog as servicecatalog
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from constructs import Construct, IConstruct
from service_catalog.products import products
class ServiceCatalogStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
deployment_name: str,
module_name: str,
portfolio_access_role_arn: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
**kwargs,
)
self.deployment_name = deployment_name
self.module_name = module_name
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment", value=f"addf-{self.deployment_name}-{self.module_name}"
)
self.portfolio = servicecatalog.Portfolio(
self,
"Portfolio",
display_name="ADDF_Portfolio",
provider_name="addf-admin",
description="Portfolio for application templates provided by ADDF",
message_language=servicecatalog.MessageLanguage.EN,
)
account_root_principle = iam.Role(
self,
"AccountRootPrincipal",
assumed_by=iam.AccountRootPrincipal(),
)
self.portfolio.give_access_to_role(account_root_principle)
if portfolio_access_role_arn is not None:
self.portfolio_access_role = iam.Role.from_role_arn(
self, "portfolio-access-role", portfolio_access_role_arn
)
self.portfolio.give_access_to_role(self.portfolio_access_role)
seed_code_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "seed_code")
for asset_dir_path in next(os.walk(seed_code_dir))[1]:
asset_dir = os.path.basename(asset_dir_path)
asset_name = "-" + asset_dir.replace("_", "-")
app_asset = assets.Asset(
self,
f"AppAsset{asset_name}",
path=os.path.join(seed_code_dir, asset_dir),
)
product_name = "ApplicationTemplate" + asset_name
product = servicecatalog.CloudFormationProduct(
self,
f"AppTemplateProductRef{asset_name}",
product_name=product_name,
owner="addf-admin",
product_versions=[
servicecatalog.CloudFormationProductVersion(
cloud_formation_template=servicecatalog.CloudFormationTemplate.from_product_stack(
products[asset_dir](
self,
f"AppTemplateProduct{app_asset}",
code_asset=app_asset,
),
),
),
],
)
Tags.of(product).add(key="sagemaker:studio-visibility", value="true")
self.portfolio.add_product(product)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/app.py | modules/service-catalog/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
from aws_cdk import App, CfnOutput, Environment
from stack import ServiceCatalogStack
deployment_name = os.getenv("ADDF_DEPLOYMENT_NAME", "")
module_name = os.getenv("ADDF_MODULE_NAME", "")
app = App()
def _param(name: str) -> str:
return f"ADDF_PARAMETER_{name}"
DEFAULT_PORTFOLIO_ACCESS_ROLE_ARN = None
portfolio_access_role_arn = os.getenv(_param("PORTFOLIO_ACCESS_ROLE_ARN"), DEFAULT_PORTFOLIO_ACCESS_ROLE_ARN)
stack = ServiceCatalogStack(
scope=app,
id=f"addf-{deployment_name}-{module_name}",
deployment_name=deployment_name,
module_name=module_name,
portfolio_access_role_arn=cast(str, portfolio_access_role_arn),
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
stack,
"PortfolioARN",
value=stack.portfolio.portfolio_arn,
)
if portfolio_access_role_arn is not None:
CfnOutput(
stack,
"PortfolioAccessRoleName",
value=stack.portfolio_access_role.role_name,
)
CfnOutput(
stack,
"PortfolioAccessRoleARN",
value=stack.portfolio_access_role.role_arn,
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/service_catalog/products.py | modules/service-catalog/service_catalog/products.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import aws_cdk.aws_codecommit as codecommit
import aws_cdk.aws_s3_assets as assets
import aws_cdk.aws_servicecatalog as servicecatalog
from aws_cdk import CfnOutput, CfnParameter
from constructs import Construct
class MLopsAppTemplateProduct(servicecatalog.ProductStack):
def __init__(self, scope: Construct, id: str, code_asset: assets.Asset) -> None:
super().__init__(scope, id)
sagemaker_project_name = CfnParameter(
self,
"SageMakerProjectName",
type="String",
description="Name of the project.",
)
sagemaker_project_id = CfnParameter(
self,
"SageMakerProjectId",
type="String",
description="Service generated Id of the project.",
)
prefix = f"{sagemaker_project_name.value_as_string}-{sagemaker_project_id.value_as_string}"
CfnOutput(
self,
"AssetPath",
value=code_asset.asset_path,
)
CfnOutput(
self,
"AssetBucket",
value=code_asset.bucket.bucket_arn,
)
codecommit.Repository(
self,
id="Repo",
repository_name=f"{prefix}-repository",
code=codecommit.Code.from_asset(code_asset),
)
# key should be the same as asset_dir, i.e.
# mlops_app corresponds to modules/service-catalog/seed_code/mlops_app
products = {"mlops_app": MLopsAppTemplateProduct}
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/service_catalog/__init__.py | modules/service-catalog/service_catalog/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/ml/stable_pipeline.py | modules/service-catalog/seed_code/mlops_app/ml/stable_pipeline.py | import json
import os
import boto3
import sagemaker
import sagemaker.session
from sagemaker.estimator import Estimator
from sagemaker.inputs import TrainingInput
from sagemaker.model import Model
from sagemaker.model_metrics import MetricsSource, ModelMetrics
from sagemaker.processing import ProcessingInput, ProcessingOutput, ScriptProcessor
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo
from sagemaker.workflow.functions import JsonGet
from sagemaker.workflow.model_step import ModelStep
from sagemaker.workflow.parameters import ParameterInteger, ParameterString
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.pipeline_context import PipelineSession
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.steps import ProcessingStep, TrainingStep
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def get_role(sagemaker_session):
return os.getenv(
"SAGEMAKER_PIPELINE_ROLE_ARN",
sagemaker.session.get_execution_role(sagemaker_session),
)
def get_sagemaker_client(region):
"""Gets the sagemaker client.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
return sagemaker_client
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
runtime_client = boto_session.client("sagemaker-runtime")
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=default_bucket,
)
def get_pipeline_session(region, default_bucket):
"""Gets the pipeline session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
PipelineSession instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
return PipelineSession(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
default_bucket=default_bucket,
)
def get_pipeline_custom_tags(new_tags, region, sagemaker_project_arn=None):
try:
sm_client = get_sagemaker_client(region)
response = sm_client.list_tags(
ResourceArn=sagemaker_project_arn,
)
project_tags = response["Tags"]
for project_tag in project_tags:
new_tags.append(project_tag)
except Exception as e:
print(f"Error getting project tags: {e}")
return new_tags
def get_pipeline(
region,
sagemaker_project_arn=None,
role=None,
default_bucket=None,
use_case_id="abalone",
model_package_group_name="abalone-stable-models",
base_job_prefix="abalone",
):
"""Gets a SageMaker ML Pipeline instance working with with Blazeface.
Args:
region: AWS region to create and run the pipeline.
role: IAM role to create and run steps and pipeline.
default_bucket: the bucket to use for storing the artifacts
Returns:
an instance of a pipeline
"""
config_file = open("../.sagemaker-code-config")
sagemaker_code_config = json.load(config_file)
sagemaker_pipeline_name = sagemaker_code_config["sagemakerPipelineName"]
pipeline_name = f"{sagemaker_pipeline_name}-stable"
model_package_group_name = f"{pipeline_name}-models"
sagemaker_session = get_session(region, default_bucket)
if role is None:
role = get_role(sagemaker_session)
pipeline_session = get_pipeline_session(region, default_bucket)
# parameters for pipeline execution
processing_instance_count = ParameterInteger(name="ProcessingInstanceCount", default_value=1)
model_approval_status = ParameterString(
name="ModelApprovalStatus",
default_value="PendingManualApproval",
)
input_data = ParameterString(
name="InputDataUrl",
default_value=f"s3://sagemaker-servicecatalog-seedcode-{region}/dataset/abalone-dataset.csv",
)
processing_instance_type = "ml.m5.xlarge"
training_instance_type = "ml.m5.xlarge"
# processing step for feature engineering
sklearn_processor = SKLearnProcessor(
framework_version="0.23-1",
instance_type=processing_instance_type,
instance_count=processing_instance_count,
base_job_name=f"{base_job_prefix}/sklearn-abalone-preprocess",
sagemaker_session=pipeline_session,
role=role,
)
step_args = sklearn_processor.run(
outputs=[
ProcessingOutput(output_name="train", source="/opt/ml/processing/train"),
ProcessingOutput(output_name="validation", source="/opt/ml/processing/validation"),
ProcessingOutput(output_name="test", source="/opt/ml/processing/test"),
],
code=os.path.join(BASE_DIR, "scripts/preprocess.py"),
arguments=["--input-data", input_data],
)
step_process = ProcessingStep(
name="PreprocessAbaloneData",
step_args=step_args,
)
# training step for generating model artifacts
model_path = f"s3://{sagemaker_session.default_bucket()}/{base_job_prefix}/AbaloneTrain"
image_uri = sagemaker.image_uris.retrieve(
framework="xgboost",
region=region,
version="1.0-1",
py_version="py3",
instance_type=training_instance_type,
)
xgb_train = Estimator(
image_uri=image_uri,
instance_type=training_instance_type,
instance_count=1,
output_path=model_path,
base_job_name=f"{base_job_prefix}/abalone-train",
sagemaker_session=pipeline_session,
role=role,
)
xgb_train.set_hyperparameters(
objective="reg:linear",
num_round=50,
max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.7,
silent=0,
)
step_args = xgb_train.fit(
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs["validation"].S3Output.S3Uri,
content_type="text/csv",
),
},
)
step_train = TrainingStep(
name="TrainAbaloneModel",
step_args=step_args,
)
# processing step for evaluation
script_eval = ScriptProcessor(
image_uri=image_uri,
command=["python3"],
instance_type=processing_instance_type,
instance_count=1,
base_job_name=f"{base_job_prefix}/script-abalone-eval",
sagemaker_session=pipeline_session,
role=role,
)
step_args = script_eval.run(
inputs=[
ProcessingInput(
source=step_train.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/model",
),
ProcessingInput(
source=step_process.properties.ProcessingOutputConfig.Outputs["test"].S3Output.S3Uri,
destination="/opt/ml/processing/test",
),
],
outputs=[
ProcessingOutput(output_name="evaluation", source="/opt/ml/processing/evaluation"),
],
code=os.path.join(BASE_DIR, "scripts/evaluate.py"),
)
evaluation_report = PropertyFile(
name="AbaloneEvaluationReport",
output_name="evaluation",
path="evaluation.json",
)
step_eval = ProcessingStep(
name="EvaluateAbaloneModel",
step_args=step_args,
property_files=[evaluation_report],
)
# register model step that will be conditionally executed
model_metrics = ModelMetrics(
model_statistics=MetricsSource(
s3_uri="{}/evaluation.json".format(
step_eval.arguments["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"],
),
content_type="application/json",
),
)
model = Model(
image_uri=image_uri,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=pipeline_session,
role=role,
)
step_args = model.register(
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.large"],
transform_instances=["ml.m5.large"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
)
step_register = ModelStep(
name="RegisterAbaloneModel",
step_args=step_args,
)
# condition step for evaluating model quality and branching execution
cond_lte = ConditionLessThanOrEqualTo(
left=JsonGet(
step_name=step_eval.name,
property_file=evaluation_report,
json_path="regression_metrics.mse.value",
),
right=6.0,
)
step_cond = ConditionStep(
name="CheckMSEAbaloneEvaluation",
conditions=[cond_lte],
if_steps=[step_register],
else_steps=[],
)
# pipeline instance
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
training_instance_type,
model_approval_status,
input_data,
],
steps=[step_process, step_train, step_eval, step_cond],
sagemaker_session=pipeline_session,
)
return pipeline, role
def upsert(region="us-east-2"):
pipeline, role = get_pipeline(region=region)
pipeline.upsert(
role_arn=role,
description=role,
)
if __name__ == "__main__":
upsert()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/ml/__init__.py | modules/service-catalog/seed_code/mlops_app/ml/__init__.py | # __init__.py
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/ml/scripts/__init__.py | modules/service-catalog/seed_code/mlops_app/ml/scripts/__init__.py | # __init__.py
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/ml/scripts/evaluate.py | modules/service-catalog/seed_code/mlops_app/ml/scripts/evaluate.py | """Evaluation script for measuring mean squared error."""
import json
import logging
import pathlib
import pickle
import tarfile
import numpy as np
import pandas as pd
import xgboost
from sklearn.metrics import mean_squared_error
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == "__main__":
logger.debug("Starting evaluation.")
model_path = "/opt/ml/processing/model/model.tar.gz"
with tarfile.open(model_path) as tar:
tar.extractall(path=".")
logger.debug("Loading xgboost model.")
model = pickle.load(open("xgboost-model", "rb"))
logger.debug("Reading test data.")
test_path = "/opt/ml/processing/test/test.csv"
df = pd.read_csv(test_path, header=None)
logger.debug("Reading test data.")
y_test = df.iloc[:, 0].to_numpy()
df.drop(df.columns[0], axis=1, inplace=True)
X_test = xgboost.DMatrix(df.values)
logger.info("Performing predictions against test data.")
predictions = model.predict(X_test)
logger.debug("Calculating mean squared error.")
mse = mean_squared_error(y_test, predictions)
std = np.std(y_test - predictions)
report_dict = {
"regression_metrics": {
"mse": {
"value": mse,
"standard_deviation": std,
},
},
}
output_dir = "/opt/ml/processing/evaluation"
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
logger.info("Writing out evaluation report with mse: %f", mse)
evaluation_path = f"{output_dir}/evaluation.json"
with open(evaluation_path, "w") as f:
f.write(json.dumps(report_dict))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/ml/scripts/preprocess.py | modules/service-catalog/seed_code/mlops_app/ml/scripts/preprocess.py | """Feature engineers the abalone dataset."""
import argparse
import logging
import os
import pathlib
import boto3
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
"sex",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
]
label_column = "rings"
feature_columns_dtype = {
"sex": str,
"length": np.float64,
"diameter": np.float64,
"height": np.float64,
"whole_weight": np.float64,
"shucked_weight": np.float64,
"viscera_weight": np.float64,
"shell_weight": np.float64,
}
label_column_dtype = {"rings": np.float64}
def merge_two_dicts(x, y):
"""Merges two dicts, returning a new copy."""
z = x.copy()
z.update(y)
return z
if __name__ == "__main__":
logger.debug("Starting preprocessing.")
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str, required=True)
args = parser.parse_args()
base_dir = "/opt/ml/processing"
pathlib.Path(f"{base_dir}/data").mkdir(parents=True, exist_ok=True)
input_data = args.input_data
bucket = input_data.split("/")[2]
key = "/".join(input_data.split("/")[3:])
logger.info("Downloading data from bucket: %s, key: %s", bucket, key)
fn = f"{base_dir}/data/abalone-dataset.csv"
s3 = boto3.resource("s3")
s3.Bucket(bucket).download_file(key, fn)
logger.debug("Reading downloaded data.")
df = pd.read_csv(
fn,
header=None,
names=feature_columns_names + [label_column],
dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype),
)
os.unlink(fn)
logger.debug("Defining transformers.")
numeric_features = list(feature_columns_names)
numeric_features.remove("sex")
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler()),
],
)
categorical_features = ["sex"]
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
],
)
preprocess = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
)
logger.info("Applying transforms.")
y = df.pop("rings")
X_pre = preprocess.fit_transform(df)
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
logger.info("Splitting %d rows of data into train, validation, test datasets.", len(X))
np.random.shuffle(X)
train, validation, test = np.split(X, [int(0.7 * len(X)), int(0.85 * len(X))])
logger.info("Writing out datasets to %s.", base_dir)
pd.DataFrame(train).to_csv(f"{base_dir}/train/train.csv", header=False, index=False)
pd.DataFrame(validation).to_csv(
f"{base_dir}/validation/validation.csv",
header=False,
index=False,
)
pd.DataFrame(test).to_csv(f"{base_dir}/test/test.csv", header=False, index=False)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/pipeline.py | modules/service-catalog/seed_code/mlops_app/infra/pipeline.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Dict, cast
import yaml
from aws_cdk import Aws, Stack, Stage, pipelines
from aws_cdk import aws_codebuild as codebuild
from aws_cdk import aws_codecommit as codecommit
from aws_cdk import aws_codepipeline as codepipeline
from aws_cdk import aws_iam as iam
from aws_cdk import aws_s3 as s3
from constructs import Construct
from notifications.notifications_stack import NotificationsStack
class PipelineStack(Stack):
def __init__(
self,
scope: Construct,
construct_id: str,
code_repository_name: str,
sagemaker_project_name: str,
sagemaker_project_id: str,
model_package_group_name: str,
project_short_name: str,
env_name: str,
**kwargs: Any,
) -> None:
super().__init__(scope, construct_id, **kwargs)
source = pipelines.CodePipelineSource.code_commit(
repository=codecommit.Repository.from_repository_name(
self,
f"source-repo-{sagemaker_project_name}-{sagemaker_project_id}",
repository_name=code_repository_name,
),
branch="main",
)
code_build_role = iam.Role(
self,
f"codebuild-{sagemaker_project_name}-{sagemaker_project_id}",
role_name=f"CodeBuildRole-{sagemaker_project_name}",
assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("PowerUserAccess"),
iam.ManagedPolicy.from_aws_managed_policy_name("IAMFullAccess"),
],
)
# TODO narrow down
sm_role = iam.Role(
self,
f"sm-role-{sagemaker_project_name}-{sagemaker_project_id}",
role_name=f"sm-role-{sagemaker_project_name}-{sagemaker_project_id}",
assumed_by=iam.ServicePrincipal("sagemaker.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name(
"AmazonSageMakerFullAccess",
),
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess"),
],
)
artifact_bucket_arn = self.node.try_get_context("artifact_bucket_arn")
# use default value for s3 bucket if not provided through 'cdk.json' file
if not artifact_bucket_arn:
artifact_bucket_arn = f"arn:{self.partition}:s3:::sagemaker-{Stack.of(self).region}-{Stack.of(self).account}"
artifact_bucket = s3.Bucket.from_bucket_arn(
self,
"code-pipeline-artifacts-bucket",
artifact_bucket_arn,
)
codepipeline_props = codepipeline.Pipeline(
self,
"CodepipelineProperty",
artifact_bucket=artifact_bucket,
pipeline_name=f"{project_short_name}-pipeline-{env_name}",
)
self.pipeline = pipelines.CodePipeline(
self,
f"{sagemaker_project_name}-{sagemaker_project_id}-pipeline",
code_pipeline=codepipeline_props,
publish_assets_in_parallel=False,
self_mutation=True,
synth=pipelines.CodeBuildStep(
"Synth",
input=source,
build_environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_5_0,
privileged=False,
),
commands=[
"cd infra",
"pip install -r requirements.txt",
"npm install -g aws-cdk",
'cdk synth --app "python app.py"',
],
role=code_build_role,
primary_output_directory="infra/cdk.out",
),
)
notification_stage_construct = NotificationStage(
self,
f"{sagemaker_project_name}-{sagemaker_project_id}-notifications-stage",
sagemaker_project_name,
sagemaker_project_id,
model_package_group_name,
project_short_name=project_short_name,
env_name=env_name,
)
notification_stage = self.pipeline.add_stage(notification_stage_construct)
sm_pipelines_buildspec = self.convert_yaml_to_json("../buildspec.yaml")
notification_stage.add_post(
pipelines.CodeBuildStep(
"SageMakerPipeline.Upsert",
input=source,
commands=[],
build_environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.STANDARD_5_0,
environment_variables={
"SAGEMAKER_PROJECT_NAME": codebuild.BuildEnvironmentVariable(
value=sagemaker_project_name,
),
"SAGEMAKER_PROJECT_ID": codebuild.BuildEnvironmentVariable(
value=sagemaker_project_id,
),
"SAGEMAKER_PIPELINE_ROLE_ARN": codebuild.BuildEnvironmentVariable(
value=sm_role.role_arn,
),
"AWS_REGION": codebuild.BuildEnvironmentVariable(
value=Aws.REGION,
),
},
),
partial_build_spec=codebuild.BuildSpec.from_object(
sm_pipelines_buildspec,
),
role=code_build_role,
),
)
artifact_bucket.grant_read_write(code_build_role)
def convert_yaml_to_json(self, file_name: str) -> Dict[Any, Any]:
with open(file_name, "r") as buildspec_yaml:
return cast(Dict[Any, Any], yaml.safe_load(buildspec_yaml))
class NotificationStage(Stage):
def __init__(
self,
scope: Construct,
construct_id: str,
sagemaker_project_name: str,
sagemaker_project_id: str,
model_package_group_name: str,
project_short_name: str,
env_name: str,
**kwargs: Any,
) -> None:
super().__init__(scope, construct_id, **kwargs)
self.notification_stack = NotificationsStack(
self,
f"{sagemaker_project_name}-{sagemaker_project_id}-notif-stack",
sagemaker_project_name=sagemaker_project_name,
sagemaker_project_id=sagemaker_project_id,
model_package_group_name=model_package_group_name,
project_short_name=project_short_name,
env_name=env_name,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/__init__.py | modules/service-catalog/seed_code/mlops_app/infra/__init__.py | # __init__.py
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/app.py | modules/service-catalog/seed_code/mlops_app/infra/app.py | #!/usr/bin/env python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
from typing import cast
import aws_cdk as cdk
import boto3
from pipeline import PipelineStack
config_file = open("../.sagemaker-code-config")
sagemaker_code_config = json.load(config_file)
sagemaker_project_name = sagemaker_code_config["sagemakerProjectName"]
sagemaker_project_id = sagemaker_code_config["sagemakerProjectId"]
sagemaker_repository_name = sagemaker_code_config["codeRepositoryName"]
sagemaker_pipeline_name = sagemaker_code_config["sagemakerPipelineName"]
project_short_name = sagemaker_code_config["projectShortName"]
env_name = "dev"
model_package_group_name = f"{sagemaker_pipeline_name}-{env_name}-models"
def get_account() -> str:
if "CDK_DEFAULT_ACCOUNT" in os.environ:
return os.environ["CDK_DEFAULT_ACCOUNT"]
return cast(str, boto3.client(service_name="sts").get_caller_identity().get("Account"))
def get_region() -> str:
if "CDK_DEFAULT_REGION" in os.environ:
return os.environ["CDK_DEFAULT_REGION"]
session = boto3.Session()
if session.region_name is None:
raise ValueError(
"It is not possible to infer AWS REGION from your environment. Please pass the --region argument.",
)
return str(session.region_name)
app = cdk.App()
PipelineStack(
app,
f"mlops-pipeline-{sagemaker_project_name}-{sagemaker_project_id}",
code_repository_name=sagemaker_repository_name,
sagemaker_project_name=sagemaker_project_name,
sagemaker_project_id=sagemaker_project_id,
model_package_group_name=model_package_group_name,
project_short_name=project_short_name,
env_name=env_name,
env=cdk.Environment(
account=get_account(),
region=get_region(),
),
)
app.synth()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.