repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/views.py | abstract-user-example/users/views.py | from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
class SignUp(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy("login")
template_name = "signup.html"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/admin.py | abstract-user-example/users/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ("email", "is_staff", "is_active",)
list_filter = ("email", "is_staff", "is_active",)
fieldsets = (
(None, {"fields": ("email", "password")}),
("Permissions", {"fields": ("is_staff", "is_active", "groups", "user_permissions")}),
)
add_fieldsets = (
(None, {
"classes": ("wide",),
"fields": (
"email", "password1", "password2", "is_staff",
"is_active", "groups", "user_permissions"
)}
),
)
search_fields = ("email",)
ordering = ("email",)
admin.site.register(CustomUser, CustomUserAdmin)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/models.py | abstract-user-example/users/models.py | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_("email address"), unique=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/managers.py | abstract-user-example/users/managers.py | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import gettext_lazy as _
class CustomUserManager(BaseUserManager):
"""
Custom user model manager where email is the unique identifiers
for authentication instead of usernames.
"""
def create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given email and password.
"""
if not email:
raise ValueError(_("The Email must be set"))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
"""
Create and save a SuperUser with the given email and password.
"""
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
extra_fields.setdefault("is_active", True)
if extra_fields.get("is_staff") is not True:
raise ValueError(_("Superuser must have is_staff=True."))
if extra_fields.get("is_superuser") is not True:
raise ValueError(_("Superuser must have is_superuser=True."))
return self.create_user(email, password, **extra_fields)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/__init__.py | abstract-user-example/users/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/tests.py | abstract-user-example/users/tests.py | from django.contrib.auth import get_user_model
from django.test import TestCase
class UsersManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(email="normal@user.com", password="foo")
self.assertEqual(user.email, "normal@user.com")
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(user.username)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email="")
with self.assertRaises(ValueError):
User.objects.create_user(email="", password="foo")
def test_create_superuser(self):
User = get_user_model()
admin_user = User.objects.create_superuser(email="super@user.com", password="foo")
self.assertEqual(admin_user.email, "super@user.com")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
try:
# username is None for the AbstractUser option
# username does not exist for the AbstractBaseUser option
self.assertIsNone(admin_user.username)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_superuser(
email="super@user.com", password="foo", is_superuser=False)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/apps.py | abstract-user-example/users/apps.py | from django.apps import AppConfig
class UsersConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "users"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/forms.py | abstract-user-example/users/forms.py | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ("email",)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ("email",)
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/urls.py | abstract-user-example/users/urls.py | from django.urls import path
from . import views
urlpatterns = [path("signup/", views.SignUp.as_view(), name="signup"), ]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/migrations/0001_initial.py | abstract-user-example/users/migrations/0001_initial.py | # Generated by Django 4.1.5 on 2023-01-21 20:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="CustomUser",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="date joined"
),
),
(
"email",
models.EmailField(
max_length=254, unique=True, verbose_name="email address"
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"abstract": False,
},
),
]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/users/migrations/__init__.py | abstract-user-example/users/migrations/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/hello_django/asgi.py | abstract-user-example/hello_django/asgi.py | """
ASGI config for hello_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
application = get_asgi_application()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/hello_django/settings.py | abstract-user-example/hello_django/settings.py | """
Django settings for hello_django project.
Generated by 'django-admin startproject' using Django 4.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-epsx)bwj9$%c+fdwjfr!bw&4!6#tu+9@-lov*dh1xj@!i)t$z^"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"users",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "hello_django.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR / "templates",
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "hello_django.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.1/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.1/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
AUTH_USER_MODEL = "users.CustomUser"
LOGIN_REDIRECT_URL = "home"
LOGOUT_REDIRECT_URL = "home"
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/hello_django/__init__.py | abstract-user-example/hello_django/__init__.py | python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false | |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/hello_django/wsgi.py | abstract-user-example/hello_django/wsgi.py | """
WSGI config for hello_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_django.settings")
application = get_wsgi_application()
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
testdrivenio/django-custom-user-model | https://github.com/testdrivenio/django-custom-user-model/blob/1136e836d233d4c8d76564826d77a567d4482491/abstract-user-example/hello_django/urls.py | abstract-user-example/hello_django/urls.py | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
urlpatterns = [
path("", TemplateView.as_view(template_name="home.html"), name="home"),
path("admin/", admin.site.urls),
path("users/", include("users.urls")),
path("users/", include("django.contrib.auth.urls")),
]
| python | MIT | 1136e836d233d4c8d76564826d77a567d4482491 | 2026-01-05T07:12:30.363478Z | false |
deactivated/python-iso3166 | https://github.com/deactivated/python-iso3166/blob/5e88a392ecd1eff8d3587ac60255d1f2545e7f83/setup.py | setup.py | from setuptools import setup
setup()
| python | MIT | 5e88a392ecd1eff8d3587ac60255d1f2545e7f83 | 2026-01-05T07:12:35.548515Z | false |
deactivated/python-iso3166 | https://github.com/deactivated/python-iso3166/blob/5e88a392ecd1eff8d3587ac60255d1f2545e7f83/src/iso3166/__init__.py | src/iso3166/__init__.py | # -*- coding: utf-8 -*-
import re
from typing import Dict, Iterator, NamedTuple, Type, TypeVar, Union, overload
__all__ = ["countries"]
StrOrInt = Union[str, int]
_D = TypeVar("_D")
class Country(NamedTuple):
name: str
alpha2: str
alpha3: str
numeric: str
apolitical_name: str
_records = [
Country("Afghanistan", "AF", "AFG", "004", "Afghanistan"),
Country("Åland Islands", "AX", "ALA", "248", "Åland Islands"),
Country("Albania", "AL", "ALB", "008", "Albania"),
Country("Algeria", "DZ", "DZA", "012", "Algeria"),
Country("American Samoa", "AS", "ASM", "016", "American Samoa"),
Country("Andorra", "AD", "AND", "020", "Andorra"),
Country("Angola", "AO", "AGO", "024", "Angola"),
Country("Anguilla", "AI", "AIA", "660", "Anguilla"),
Country("Antarctica", "AQ", "ATA", "010", "Antarctica"),
Country("Antigua and Barbuda", "AG", "ATG", "028", "Antigua and Barbuda"),
Country("Argentina", "AR", "ARG", "032", "Argentina"),
Country("Armenia", "AM", "ARM", "051", "Armenia"),
Country("Aruba", "AW", "ABW", "533", "Aruba"),
Country("Australia", "AU", "AUS", "036", "Australia"),
Country("Austria", "AT", "AUT", "040", "Austria"),
Country("Azerbaijan", "AZ", "AZE", "031", "Azerbaijan"),
Country("Bahamas", "BS", "BHS", "044", "Bahamas"),
Country("Bahrain", "BH", "BHR", "048", "Bahrain"),
Country("Bangladesh", "BD", "BGD", "050", "Bangladesh"),
Country("Barbados", "BB", "BRB", "052", "Barbados"),
Country("Belarus", "BY", "BLR", "112", "Belarus"),
Country("Belgium", "BE", "BEL", "056", "Belgium"),
Country("Belize", "BZ", "BLZ", "084", "Belize"),
Country("Benin", "BJ", "BEN", "204", "Benin"),
Country("Bermuda", "BM", "BMU", "060", "Bermuda"),
Country("Bhutan", "BT", "BTN", "064", "Bhutan"),
Country(
"Bolivia, Plurinational State of",
"BO",
"BOL",
"068",
"Bolivia, Plurinational State of",
),
Country(
"Bonaire, Sint Eustatius and Saba",
"BQ",
"BES",
"535",
"Bonaire, Sint Eustatius and Saba",
),
Country(
"Bosnia and Herzegovina", "BA", "BIH", "070", "Bosnia and Herzegovina"
),
Country("Botswana", "BW", "BWA", "072", "Botswana"),
Country("Bouvet Island", "BV", "BVT", "074", "Bouvet Island"),
Country("Brazil", "BR", "BRA", "076", "Brazil"),
Country(
"British Indian Ocean Territory",
"IO",
"IOT",
"086",
"British Indian Ocean Territory",
),
Country("Brunei Darussalam", "BN", "BRN", "096", "Brunei Darussalam"),
Country("Bulgaria", "BG", "BGR", "100", "Bulgaria"),
Country("Burkina Faso", "BF", "BFA", "854", "Burkina Faso"),
Country("Burundi", "BI", "BDI", "108", "Burundi"),
Country("Cambodia", "KH", "KHM", "116", "Cambodia"),
Country("Cameroon", "CM", "CMR", "120", "Cameroon"),
Country("Canada", "CA", "CAN", "124", "Canada"),
Country("Cabo Verde", "CV", "CPV", "132", "Cabo Verde"),
Country("Cayman Islands", "KY", "CYM", "136", "Cayman Islands"),
Country(
"Central African Republic",
"CF",
"CAF",
"140",
"Central African Republic",
),
Country("Chad", "TD", "TCD", "148", "Chad"),
Country("Chile", "CL", "CHL", "152", "Chile"),
Country("China", "CN", "CHN", "156", "China"),
Country("Christmas Island", "CX", "CXR", "162", "Christmas Island"),
Country(
"Cocos (Keeling) Islands",
"CC",
"CCK",
"166",
"Cocos (Keeling) Islands",
),
Country("Colombia", "CO", "COL", "170", "Colombia"),
Country("Comoros", "KM", "COM", "174", "Comoros"),
Country("Congo", "CG", "COG", "178", "Congo"),
Country(
"Congo, Democratic Republic of the",
"CD",
"COD",
"180",
"Congo, Democratic Republic of the",
),
Country("Cook Islands", "CK", "COK", "184", "Cook Islands"),
Country("Costa Rica", "CR", "CRI", "188", "Costa Rica"),
Country("Côte d'Ivoire", "CI", "CIV", "384", "Côte d'Ivoire"),
Country("Croatia", "HR", "HRV", "191", "Croatia"),
Country("Cuba", "CU", "CUB", "192", "Cuba"),
Country("Curaçao", "CW", "CUW", "531", "Curaçao"),
Country("Cyprus", "CY", "CYP", "196", "Cyprus"),
Country("Czechia", "CZ", "CZE", "203", "Czechia"),
Country("Denmark", "DK", "DNK", "208", "Denmark"),
Country("Djibouti", "DJ", "DJI", "262", "Djibouti"),
Country("Dominica", "DM", "DMA", "212", "Dominica"),
Country("Dominican Republic", "DO", "DOM", "214", "Dominican Republic"),
Country("Ecuador", "EC", "ECU", "218", "Ecuador"),
Country("Egypt", "EG", "EGY", "818", "Egypt"),
Country("El Salvador", "SV", "SLV", "222", "El Salvador"),
Country("Equatorial Guinea", "GQ", "GNQ", "226", "Equatorial Guinea"),
Country("Eritrea", "ER", "ERI", "232", "Eritrea"),
Country("Estonia", "EE", "EST", "233", "Estonia"),
Country("Ethiopia", "ET", "ETH", "231", "Ethiopia"),
Country(
"Falkland Islands (Malvinas)",
"FK",
"FLK",
"238",
"Falkland Islands (Malvinas)",
),
Country("Faroe Islands", "FO", "FRO", "234", "Faroe Islands"),
Country("Fiji", "FJ", "FJI", "242", "Fiji"),
Country("Finland", "FI", "FIN", "246", "Finland"),
Country("France", "FR", "FRA", "250", "France"),
Country("French Guiana", "GF", "GUF", "254", "French Guiana"),
Country("French Polynesia", "PF", "PYF", "258", "French Polynesia"),
Country(
"French Southern Territories",
"TF",
"ATF",
"260",
"French Southern Territories",
),
Country("Gabon", "GA", "GAB", "266", "Gabon"),
Country("Gambia", "GM", "GMB", "270", "Gambia"),
Country("Georgia", "GE", "GEO", "268", "Georgia"),
Country("Germany", "DE", "DEU", "276", "Germany"),
Country("Ghana", "GH", "GHA", "288", "Ghana"),
Country("Gibraltar", "GI", "GIB", "292", "Gibraltar"),
Country("Greece", "GR", "GRC", "300", "Greece"),
Country("Greenland", "GL", "GRL", "304", "Greenland"),
Country("Grenada", "GD", "GRD", "308", "Grenada"),
Country("Guadeloupe", "GP", "GLP", "312", "Guadeloupe"),
Country("Guam", "GU", "GUM", "316", "Guam"),
Country("Guatemala", "GT", "GTM", "320", "Guatemala"),
Country("Guernsey", "GG", "GGY", "831", "Guernsey"),
Country("Guinea", "GN", "GIN", "324", "Guinea"),
Country("Guinea-Bissau", "GW", "GNB", "624", "Guinea-Bissau"),
Country("Guyana", "GY", "GUY", "328", "Guyana"),
Country("Haiti", "HT", "HTI", "332", "Haiti"),
Country(
"Heard Island and McDonald Islands",
"HM",
"HMD",
"334",
"Heard Island and McDonald Islands",
),
Country("Holy See", "VA", "VAT", "336", "Holy See"),
Country("Honduras", "HN", "HND", "340", "Honduras"),
Country("Hong Kong", "HK", "HKG", "344", "Hong Kong"),
Country("Hungary", "HU", "HUN", "348", "Hungary"),
Country("Iceland", "IS", "ISL", "352", "Iceland"),
Country("India", "IN", "IND", "356", "India"),
Country("Indonesia", "ID", "IDN", "360", "Indonesia"),
Country(
"Iran, Islamic Republic of",
"IR",
"IRN",
"364",
"Iran, Islamic Republic of",
),
Country("Iraq", "IQ", "IRQ", "368", "Iraq"),
Country("Ireland", "IE", "IRL", "372", "Ireland"),
Country("Isle of Man", "IM", "IMN", "833", "Isle of Man"),
Country("Israel", "IL", "ISR", "376", "Israel"),
Country("Italy", "IT", "ITA", "380", "Italy"),
Country("Jamaica", "JM", "JAM", "388", "Jamaica"),
Country("Japan", "JP", "JPN", "392", "Japan"),
Country("Jersey", "JE", "JEY", "832", "Jersey"),
Country("Jordan", "JO", "JOR", "400", "Jordan"),
Country("Kazakhstan", "KZ", "KAZ", "398", "Kazakhstan"),
Country("Kenya", "KE", "KEN", "404", "Kenya"),
Country("Kiribati", "KI", "KIR", "296", "Kiribati"),
Country(
"Korea, Democratic People's Republic of",
"KP",
"PRK",
"408",
"Korea, Democratic People's Republic of",
),
Country("Korea, Republic of", "KR", "KOR", "410", "Korea, Republic of"),
Country("Kosovo", "XK", "XKX", "983", "Kosovo"),
Country("Kuwait", "KW", "KWT", "414", "Kuwait"),
Country("Kyrgyzstan", "KG", "KGZ", "417", "Kyrgyzstan"),
Country(
"Lao People's Democratic Republic",
"LA",
"LAO",
"418",
"Lao People's Democratic Republic",
),
Country("Latvia", "LV", "LVA", "428", "Latvia"),
Country("Lebanon", "LB", "LBN", "422", "Lebanon"),
Country("Lesotho", "LS", "LSO", "426", "Lesotho"),
Country("Liberia", "LR", "LBR", "430", "Liberia"),
Country("Libya", "LY", "LBY", "434", "Libya"),
Country("Liechtenstein", "LI", "LIE", "438", "Liechtenstein"),
Country("Lithuania", "LT", "LTU", "440", "Lithuania"),
Country("Luxembourg", "LU", "LUX", "442", "Luxembourg"),
Country("Macao", "MO", "MAC", "446", "Macao"),
Country("North Macedonia", "MK", "MKD", "807", "North Macedonia"),
Country("Madagascar", "MG", "MDG", "450", "Madagascar"),
Country("Malawi", "MW", "MWI", "454", "Malawi"),
Country("Malaysia", "MY", "MYS", "458", "Malaysia"),
Country("Maldives", "MV", "MDV", "462", "Maldives"),
Country("Mali", "ML", "MLI", "466", "Mali"),
Country("Malta", "MT", "MLT", "470", "Malta"),
Country("Marshall Islands", "MH", "MHL", "584", "Marshall Islands"),
Country("Martinique", "MQ", "MTQ", "474", "Martinique"),
Country("Mauritania", "MR", "MRT", "478", "Mauritania"),
Country("Mauritius", "MU", "MUS", "480", "Mauritius"),
Country("Mayotte", "YT", "MYT", "175", "Mayotte"),
Country("Mexico", "MX", "MEX", "484", "Mexico"),
Country(
"Micronesia, Federated States of",
"FM",
"FSM",
"583",
"Micronesia, Federated States of",
),
Country(
"Moldova, Republic of", "MD", "MDA", "498", "Moldova, Republic of"
),
Country("Monaco", "MC", "MCO", "492", "Monaco"),
Country("Mongolia", "MN", "MNG", "496", "Mongolia"),
Country("Montenegro", "ME", "MNE", "499", "Montenegro"),
Country("Montserrat", "MS", "MSR", "500", "Montserrat"),
Country("Morocco", "MA", "MAR", "504", "Morocco"),
Country("Mozambique", "MZ", "MOZ", "508", "Mozambique"),
Country("Myanmar", "MM", "MMR", "104", "Myanmar"),
Country("Namibia", "NA", "NAM", "516", "Namibia"),
Country("Nauru", "NR", "NRU", "520", "Nauru"),
Country("Nepal", "NP", "NPL", "524", "Nepal"),
Country("Netherlands", "NL", "NLD", "528", "Netherlands"),
Country("New Caledonia", "NC", "NCL", "540", "New Caledonia"),
Country("New Zealand", "NZ", "NZL", "554", "New Zealand"),
Country("Nicaragua", "NI", "NIC", "558", "Nicaragua"),
Country("Niger", "NE", "NER", "562", "Niger"),
Country("Nigeria", "NG", "NGA", "566", "Nigeria"),
Country("Niue", "NU", "NIU", "570", "Niue"),
Country("Norfolk Island", "NF", "NFK", "574", "Norfolk Island"),
Country(
"Northern Mariana Islands",
"MP",
"MNP",
"580",
"Northern Mariana Islands",
),
Country("Norway", "NO", "NOR", "578", "Norway"),
Country("Oman", "OM", "OMN", "512", "Oman"),
Country("Pakistan", "PK", "PAK", "586", "Pakistan"),
Country("Palau", "PW", "PLW", "585", "Palau"),
Country("Palestine, State of", "PS", "PSE", "275", "Palestine"),
Country("Panama", "PA", "PAN", "591", "Panama"),
Country("Papua New Guinea", "PG", "PNG", "598", "Papua New Guinea"),
Country("Paraguay", "PY", "PRY", "600", "Paraguay"),
Country("Peru", "PE", "PER", "604", "Peru"),
Country("Philippines", "PH", "PHL", "608", "Philippines"),
Country("Pitcairn", "PN", "PCN", "612", "Pitcairn"),
Country("Poland", "PL", "POL", "616", "Poland"),
Country("Portugal", "PT", "PRT", "620", "Portugal"),
Country("Puerto Rico", "PR", "PRI", "630", "Puerto Rico"),
Country("Qatar", "QA", "QAT", "634", "Qatar"),
Country("Réunion", "RE", "REU", "638", "Réunion"),
Country("Romania", "RO", "ROU", "642", "Romania"),
Country("Russian Federation", "RU", "RUS", "643", "Russian Federation"),
Country("Rwanda", "RW", "RWA", "646", "Rwanda"),
Country("Saint Barthélemy", "BL", "BLM", "652", "Saint Barthélemy"),
Country(
"Saint Helena, Ascension and Tristan da Cunha",
"SH",
"SHN",
"654",
"Saint Helena, Ascension and Tristan da Cunha",
),
Country(
"Saint Kitts and Nevis", "KN", "KNA", "659", "Saint Kitts and Nevis"
),
Country("Saint Lucia", "LC", "LCA", "662", "Saint Lucia"),
Country(
"Saint Martin (French part)",
"MF",
"MAF",
"663",
"Saint Martin (French part)",
),
Country(
"Saint Pierre and Miquelon",
"PM",
"SPM",
"666",
"Saint Pierre and Miquelon",
),
Country(
"Saint Vincent and the Grenadines",
"VC",
"VCT",
"670",
"Saint Vincent and the Grenadines",
),
Country("Samoa", "WS", "WSM", "882", "Samoa"),
Country("San Marino", "SM", "SMR", "674", "San Marino"),
Country(
"Sao Tome and Principe", "ST", "STP", "678", "Sao Tome and Principe"
),
Country("Saudi Arabia", "SA", "SAU", "682", "Saudi Arabia"),
Country("Senegal", "SN", "SEN", "686", "Senegal"),
Country("Serbia", "RS", "SRB", "688", "Serbia"),
Country("Seychelles", "SC", "SYC", "690", "Seychelles"),
Country("Sierra Leone", "SL", "SLE", "694", "Sierra Leone"),
Country("Singapore", "SG", "SGP", "702", "Singapore"),
Country(
"Sint Maarten (Dutch part)",
"SX",
"SXM",
"534",
"Sint Maarten (Dutch part)",
),
Country("Slovakia", "SK", "SVK", "703", "Slovakia"),
Country("Slovenia", "SI", "SVN", "705", "Slovenia"),
Country("Solomon Islands", "SB", "SLB", "090", "Solomon Islands"),
Country("Somalia", "SO", "SOM", "706", "Somalia"),
Country("South Africa", "ZA", "ZAF", "710", "South Africa"),
Country(
"South Georgia and the South Sandwich Islands",
"GS",
"SGS",
"239",
"South Georgia and the South Sandwich Islands",
),
Country("South Sudan", "SS", "SSD", "728", "South Sudan"),
Country("Spain", "ES", "ESP", "724", "Spain"),
Country("Sri Lanka", "LK", "LKA", "144", "Sri Lanka"),
Country("Sudan", "SD", "SDN", "729", "Sudan"),
Country("Suriname", "SR", "SUR", "740", "Suriname"),
Country(
"Svalbard and Jan Mayen", "SJ", "SJM", "744", "Svalbard and Jan Mayen"
),
Country("Eswatini", "SZ", "SWZ", "748", "Eswatini"),
Country("Sweden", "SE", "SWE", "752", "Sweden"),
Country("Switzerland", "CH", "CHE", "756", "Switzerland"),
Country(
"Syrian Arab Republic", "SY", "SYR", "760", "Syrian Arab Republic"
),
Country("Taiwan, Province of China", "TW", "TWN", "158", "Taiwan"),
Country("Tajikistan", "TJ", "TJK", "762", "Tajikistan"),
Country(
"Tanzania, United Republic of",
"TZ",
"TZA",
"834",
"Tanzania, United Republic of",
),
Country("Thailand", "TH", "THA", "764", "Thailand"),
Country("Timor-Leste", "TL", "TLS", "626", "Timor-Leste"),
Country("Togo", "TG", "TGO", "768", "Togo"),
Country("Tokelau", "TK", "TKL", "772", "Tokelau"),
Country("Tonga", "TO", "TON", "776", "Tonga"),
Country("Trinidad and Tobago", "TT", "TTO", "780", "Trinidad and Tobago"),
Country("Tunisia", "TN", "TUN", "788", "Tunisia"),
Country("Türkiye", "TR", "TUR", "792", "Türkiye"),
Country("Turkmenistan", "TM", "TKM", "795", "Turkmenistan"),
Country(
"Turks and Caicos Islands",
"TC",
"TCA",
"796",
"Turks and Caicos Islands",
),
Country("Tuvalu", "TV", "TUV", "798", "Tuvalu"),
Country("Uganda", "UG", "UGA", "800", "Uganda"),
Country("Ukraine", "UA", "UKR", "804", "Ukraine"),
Country(
"United Arab Emirates", "AE", "ARE", "784", "United Arab Emirates"
),
Country(
"United Kingdom of Great Britain and Northern Ireland",
"GB",
"GBR",
"826",
"United Kingdom of Great Britain and Northern Ireland",
),
Country(
"United States of America",
"US",
"USA",
"840",
"United States of America",
),
Country(
"United States Minor Outlying Islands",
"UM",
"UMI",
"581",
"United States Minor Outlying Islands",
),
Country("Uruguay", "UY", "URY", "858", "Uruguay"),
Country("Uzbekistan", "UZ", "UZB", "860", "Uzbekistan"),
Country("Vanuatu", "VU", "VUT", "548", "Vanuatu"),
Country(
"Venezuela, Bolivarian Republic of",
"VE",
"VEN",
"862",
"Venezuela, Bolivarian Republic of",
),
Country("Viet Nam", "VN", "VNM", "704", "Viet Nam"),
Country(
"Virgin Islands, British",
"VG",
"VGB",
"092",
"Virgin Islands, British",
),
Country(
"Virgin Islands, U.S.", "VI", "VIR", "850", "Virgin Islands, U.S."
),
Country("Wallis and Futuna", "WF", "WLF", "876", "Wallis and Futuna"),
Country("Western Sahara", "EH", "ESH", "732", "Western Sahara"),
Country("Yemen", "YE", "YEM", "887", "Yemen"),
Country("Zambia", "ZM", "ZMB", "894", "Zambia"),
Country("Zimbabwe", "ZW", "ZWE", "716", "Zimbabwe"),
]
def _build_index(idx: int) -> Dict[str, Country]:
return dict((r[idx].upper(), r) for r in _records)
# Internal country indexes
_by_alpha2 = _build_index(1)
_by_alpha3 = _build_index(2)
_by_numeric = _build_index(3)
_by_name = _build_index(0)
_by_apolitical_name = _build_index(4)
# Documented accessors for the country indexes
countries_by_alpha2 = _by_alpha2
countries_by_alpha3 = _by_alpha3
countries_by_numeric = _by_numeric
countries_by_name = _by_name
countries_by_apolitical_name = _by_apolitical_name
class NotFound:
pass
class _CountryLookup:
@overload
def get(self, key: StrOrInt) -> Country:
...
@overload
def get(self, key: StrOrInt, default: _D) -> Union[Country, _D]:
...
def get(
self, key: StrOrInt, default: Union[Type[NotFound], _D] = NotFound
) -> Union[Country, _D]:
if isinstance(key, int):
k = f"{key:03d}"
r = _by_numeric.get(k, default)
else:
k = key.upper()
if len(k) == 2:
r = _by_alpha2.get(k, default)
elif len(k) == 3 and re.match(r"[0-9]{3}", k) and k != "000":
r = _by_numeric.get(k, default)
elif len(k) == 3:
r = _by_alpha3.get(k, default)
elif k in _by_name:
r = _by_name.get(k, default)
else:
r = _by_apolitical_name.get(k, default)
if r == NotFound:
raise KeyError(key)
return r
__getitem__ = get
def __len__(self) -> int:
return len(_records)
def __iter__(self) -> Iterator[Country]:
return iter(_records)
def __contains__(self, item: StrOrInt) -> bool:
try:
self.get(item)
return True
except KeyError:
return False
countries = _CountryLookup()
| python | MIT | 5e88a392ecd1eff8d3587ac60255d1f2545e7f83 | 2026-01-05T07:12:35.548515Z | false |
deactivated/python-iso3166 | https://github.com/deactivated/python-iso3166/blob/5e88a392ecd1eff8d3587ac60255d1f2545e7f83/tests/test_listings.py | tests/test_listings.py | # -*- coding: utf-8 -*-
import iso3166
def test_country_list() -> None:
country_list = iso3166.countries
assert len(country_list) > 100
assert all(isinstance(c, iso3166.Country) for c in country_list)
def test_by_name() -> None:
table = iso3166.countries_by_name
assert len(table) >= len(iso3166.countries)
assert table["AFGHANISTAN"].name == "Afghanistan"
def test_by_alt_name() -> None:
table = iso3166.countries_by_apolitical_name
assert len(table) >= len(iso3166.countries)
assert table["AFGHANISTAN"].name == "Afghanistan"
assert table["TAIWAN"].apolitical_name == "Taiwan"
def test_by_number() -> None:
table = iso3166.countries_by_numeric
assert len(table) >= len(iso3166.countries)
assert table["008"].name == "Albania"
def test_by_alpha2() -> None:
table = iso3166.countries_by_alpha2
assert len(table) >= len(iso3166.countries)
assert table["AE"].name == "United Arab Emirates"
def test_by_alpha3() -> None:
table = iso3166.countries_by_alpha3
assert len(table) >= len(iso3166.countries)
assert table["AFG"].name == "Afghanistan"
| python | MIT | 5e88a392ecd1eff8d3587ac60255d1f2545e7f83 | 2026-01-05T07:12:35.548515Z | false |
deactivated/python-iso3166 | https://github.com/deactivated/python-iso3166/blob/5e88a392ecd1eff8d3587ac60255d1f2545e7f83/tests/test_lookup.py | tests/test_lookup.py | # -*- coding: utf-8 -*-
from typing import List, Union
import pytest
import iso3166
from iso3166 import countries
def check_lookup(
alpha2: str,
matching_keys: List[Union[int, str]],
missing_keys: List[Union[int, str]],
) -> None:
for k in matching_keys:
assert countries[k].alpha2 == alpha2
assert countries.get(k).alpha2 == alpha2
assert k in countries
for k in missing_keys:
with pytest.raises(KeyError):
countries.get(k)
with pytest.raises(KeyError):
countries[k]
assert countries.get(k, None) is None
def test_length() -> None:
assert len(countries) == len(iso3166._records)
def test_empty_string() -> None:
check_lookup("US", ["us", "US"], [""])
def test_alpha2() -> None:
check_lookup("US", ["us", "US"], ["zz"])
def test_alpha3() -> None:
check_lookup("US", ["usa", "USA"], ["zzz"])
def test_name() -> None:
check_lookup(
"US",
["united states of america", "United STates of America"],
["zzzzz"],
)
def test_numeric() -> None:
check_lookup("US", [840, "840"], [111, "111"])
with pytest.raises(KeyError):
countries.get("000")
def test_alt_name() -> None:
check_lookup("TW", ["taiwan", "Taiwan, province of china"], ["zzzzz"])
check_lookup("PS", ["palestine", "palestine, state of"], ["zzzz"])
def test_none_default() -> None:
assert countries.get("NOTUS", None) is None
def test_data() -> None:
assert len(list(countries)) > 0
for country in countries:
assert len(country.alpha2) == 2
assert country.alpha2.upper() == country.alpha2
assert len(country.alpha3) == 3
assert country.alpha3.upper() == country.alpha3
assert len(country.numeric) == 3
assert country.numeric == ("%03d" % int(country.numeric))
assert int(country.numeric) > 0
assert len(country.name) > 3
assert len(country.apolitical_name) > 3
| python | MIT | 5e88a392ecd1eff8d3587ac60255d1f2545e7f83 | 2026-01-05T07:12:35.548515Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/eks-to-opensearch/stack.py | modules/integration/eks-to-opensearch/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from aws_cdk import aws_opensearchservice as opensearch
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class EksOpenSearchIntegrationStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project: str,
deployment: str,
module: str,
opensearch_sg_id: str,
opensearch_domain_endpoint: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_cluster_sg_id: str,
eks_oidc_arn: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack integrates EKS Cluster with Opensearch cluster",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
dep_mod = f"{project}-{deployment}-{module}"
dep_mod = dep_mod[:27]
# Import OpenSearch Domain
os_domain = opensearch.Domain.from_domain_endpoint(
self, f"{dep_mod}-os-domain", f"https://{opensearch_domain_endpoint}"
)
os_security_group = ec2.SecurityGroup.from_security_group_id(self, f"{dep_mod}-os-sg", opensearch_sg_id)
# Import EKS Cluster
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_admin_role_arn,
open_id_connect_provider=provider,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
eks_cluster_security_group = ec2.SecurityGroup.from_security_group_id(
self, f"{dep_mod}-eks-sg", eks_cluster_sg_id
)
# Add a rule to allow our new SG to talk to the EKS control plane
eks_cluster_security_group.add_ingress_rule(os_security_group, ec2.Port.all_traffic())
# Add a rule to allow the EKS control plane to talk to OS SG
os_security_group.add_ingress_rule(eks_cluster_security_group, ec2.Port.all_traffic())
# Create the Service Account
fluentbit_service_account = eks_cluster.add_service_account(
"fluentbit", name="fluentbit", namespace="kube-system"
)
fluentbit_policy_statement_json_1 = {
"Effect": "Allow",
"Action": ["es:*"],
"Resource": [os_domain.domain_arn],
}
# Add the policies to the service account
fluentbit_service_account.add_to_principal_policy(
iam.PolicyStatement.from_json(fluentbit_policy_statement_json_1)
)
os_domain.grant_write(fluentbit_service_account)
# For more info check out https://github.com/fluent/helm-charts/tree/main/charts/fluent-bit
fluentbit_chart = eks_cluster.add_helm_chart(
"fluentbit",
chart="fluent-bit",
version="0.19.17",
release="fluent-bit",
repository="https://fluent.github.io/helm-charts",
namespace="kube-system",
values={
"serviceAccount": {"create": False, "name": "fluentbit"},
"config": {
"outputs": "[OUTPUT]\n Name es\n Match *\n"
" AWS_Region " + self.region + "\n AWS_Auth On\n"
" Host " + os_domain.domain_endpoint + "\n Port 443\n"
" TLS On\n Replace_Dots On\n Logstash_Format On"
},
},
)
fluentbit_chart.node.add_dependency(fluentbit_service_account)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks(log_ignores=True))
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
id="AwsSolutions-IAM4",
reason="Managed Policies are for service account roles only",
),
NagPackSuppression(
id="AwsSolutions-IAM5",
reason="Resource access restriced to ADDF resources",
),
NagPackSuppression(
id="AwsSolutions-L1",
reason="Not creating the Lambda directly",
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/eks-to-opensearch/app.py | modules/integration/eks-to-opensearch/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
from aws_cdk import App
from stack import EksOpenSearchIntegrationStack
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
opensearch_sg_id = os.getenv(_param("OPENSEARCH_SG_ID"), "")
opensearch_domain_endpoint = os.getenv(_param("OPENSEARCH_DOMAIN_ENDPOINT"), "")
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "")
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "")
eks_cluster_sg_id = os.getenv(_param("EKS_CLUSTER_SG_ID"), "")
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "")
app = App()
stack = EksOpenSearchIntegrationStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project=project_name,
deployment=deployment_name,
module=module_name,
opensearch_sg_id=opensearch_sg_id,
opensearch_domain_endpoint=opensearch_domain_endpoint,
eks_cluster_name=eks_cluster_name,
eks_admin_role_arn=eks_admin_role_arn,
eks_cluster_sg_id=eks_cluster_sg_id,
eks_oidc_arn=eks_oidc_arn,
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/eks-to-opensearch/tests/test_app.py | modules/integration/eks-to-opensearch/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
from unittest import mock
import pytest
@pytest.fixture(scope="function", autouse=True)
def stack_defaults():
with mock.patch.dict(os.environ, {}, clear=True):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] = "test-cluster"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN"] = "arn:aws:iam::111111111111:role/test-role"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_SG_ID"] = "sg-xxx"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_SG_ID"] = "sg-yyy"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_ENDPOINT"] = "xxxxxxx.us-east-1.es.amazonaws.com"
os.environ["SEEDFARMER_PARAMETER_EKS_OIDC_ARN"] = (
"arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXX"
)
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
yield
def test_app(stack_defaults):
import app # noqa: F401
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/eks-to-opensearch/tests/test_stack.py | modules/integration/eks-to-opensearch/tests/test_stack.py | import aws_cdk as cdk
import cdk_nag
import pytest
from aws_cdk.assertions import Annotations, Match, Template
@pytest.fixture(scope="function")
def app() -> cdk.App:
return cdk.App()
@pytest.fixture(scope="function")
def stack(app: cdk.App) -> cdk.Stack:
from stack import EksOpenSearchIntegrationStack
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
app_prefix = f"{project_name}-{dep_name}-{mod_name}"
return EksOpenSearchIntegrationStack(
scope=app,
id=app_prefix,
project=project_name,
deployment=dep_name,
module=mod_name,
env=cdk.Environment(
account="111111111111",
region="us-east-1",
),
opensearch_sg_id="sg-xxx",
opensearch_domain_endpoint="xxxxxxx.us-east-1.es.amazonaws.com",
eks_cluster_name="test-cluster",
eks_admin_role_arn="arn:aws:iam::111111111111:role/test-role",
eks_cluster_sg_id="sg-yyy",
eks_oidc_arn="arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXX",
)
def test_synthesize_stack(stack: cdk.Stack) -> None:
Template.from_stack(stack)
def test_no_cdk_nag_errors(stack: cdk.Stack) -> None:
cdk.Aspects.of(stack).add(cdk_nag.AwsSolutionsChecks())
nag_errors = Annotations.from_stack(stack).find_error(
"*",
Match.string_like_regexp(r"AwsSolutions-.*"),
)
assert not nag_errors, f"Found {len(nag_errors)} CDK nag errors"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/eks-to-opensearch/tests/__init__.py | modules/integration/eks-to-opensearch/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/stack.py | modules/integration/ddb-to-opensearch/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, List, cast
import cdk_nag
from aws_cdk import Aspects, Duration, Stack, Tags
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import aws_lambda as lambda_
from aws_cdk.aws_lambda_python_alpha import PythonFunction, PythonLayerVersion
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class DDBtoOpensearch(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project: str,
deployment: str,
module: str,
vpc_id: str,
private_subnet_ids: List[str],
opensearch_sg_id: str,
opensearch_domain_endpoint: str,
opensearch_domain_name: str,
ddb_stream_arn: str,
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(scope, id, description=stack_description, **kwargs)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
dep_mod = f"{project}-{deployment}-{module}"
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
self.private_subnets = []
for idx, subnet_id in enumerate(private_subnet_ids):
self.private_subnets.append(ec2.Subnet.from_subnet_id(scope=self, id=f"subnet{idx}", subnet_id=subnet_id))
os_security_group = ec2.SecurityGroup.from_security_group_id(
self, f"{dep_mod}-os-sg", opensearch_sg_id, allow_all_outbound=True
)
# Allow ingress
os_security_group.add_ingress_rule(peer=os_security_group, connection=ec2.Port.all_traffic())
ddb_os_lambda_policy = iam.PolicyDocument(
statements=[
iam.PolicyStatement(
actions=[
"es:ESHttpPost",
"es:ESHttpPut",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:es:{self.region}:{self.account}:domain/{opensearch_domain_name}*"
],
),
iam.PolicyStatement(
actions=[
"dynamodb:DescribeStream",
"dynamodb:GetRecords",
"dynamodb:GetShardIterator",
"dynamodb:ListStreams",
],
effect=iam.Effect.ALLOW,
resources=[ddb_stream_arn],
),
iam.PolicyStatement(
actions=[
"logs:CreateLogStream",
"logs:CreateLogGroup",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:GetLogRecord",
"logs:GetLogGroupFields",
"logs:GetQueryResults",
"logs:DescribeLogGroups",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:logs:{self.region}:{self.account}:log-group:*"],
),
iam.PolicyStatement(
actions=["ec2:Create*", "ec2:Delete*", "ec2:Describe*"],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
actions=["sts:AssumeRole"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:iam::{self.account}:role/addf-*"],
),
]
)
ddb_os_lambda_role = iam.Role(
self,
f"{dep_mod}-lambda-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("lambda.amazonaws.com"),
),
inline_policies={"DDBtoOSPolicyDocument": ddb_os_lambda_policy},
)
requests_awsauth_layer = PythonLayerVersion(
self,
id=f"{dep_mod}-requests-aws4auth",
entry="layers/",
layer_version_name=f"{dep_mod}-requests-aws4auth",
compatible_runtimes=[lambda_.Runtime.PYTHON_3_10],
)
lambda_trigger = PythonFunction(
self,
"DDBtoOSTriggerLambda",
entry="lambda",
runtime=lambda_.Runtime.PYTHON_3_10,
index="index.py",
handler="handler",
timeout=Duration.minutes(1),
security_groups=[os_security_group],
vpc=self.vpc,
vpc_subnets=ec2.SubnetSelection(subnets=self.private_subnets),
environment={
"REGION": self.region,
"ACCOUNT": self.account,
"DOMAIN_ENDPOINT": opensearch_domain_endpoint,
},
role=ddb_os_lambda_role,
layers=[requests_awsauth_layer],
)
lambda_trigger.add_event_source_mapping(
f"{dep_mod}-RosbagMetadataMapping",
event_source_arn=ddb_stream_arn,
starting_position=lambda_.StartingPosition.TRIM_HORIZON,
batch_size=10,
)
self.lambda_name = lambda_trigger.function_name
self.lambda_arn = lambda_trigger.function_arn
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to the resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/app.py | modules/integration/ddb-to-opensearch/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
from aws_cdk import App, CfnOutput
from stack import DDBtoOpensearch
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID"))
private_subnet_ids_param = os.getenv(_param("PRIVATE_SUBNET_IDS"))
if not vpc_id:
raise ValueError("missing input parameter vpc-id")
if not private_subnet_ids_param:
raise ValueError("missing input parameter private-subnet-ids")
else:
private_subnet_ids = json.loads(private_subnet_ids_param)
opensearch_sg_id = os.getenv(_param("OPENSEARCH_SG_ID"), "")
opensearch_domain_name = os.getenv(_param("OPENSEARCH_DOMAIN_NAME"), "")
opensearch_domain_endpoint = os.getenv(_param("OPENSEARCH_DOMAIN_ENDPOINT"), "")
ddb_stream_arn = os.getenv(_param("ROSBAG_STREAM_ARN"), "")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "DDB to OpenSearch Module"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = DDBtoOpensearch(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project=project_name,
deployment=deployment_name,
module=module_name,
vpc_id=vpc_id,
private_subnet_ids=private_subnet_ids,
opensearch_sg_id=opensearch_sg_id,
opensearch_domain_endpoint=opensearch_domain_endpoint,
opensearch_domain_name=opensearch_domain_name,
ddb_stream_arn=ddb_stream_arn,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"LambdaName": stack.lambda_name,
"LambdaArn": stack.lambda_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/tests/test_app.py | modules/integration/ddb-to-opensearch/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-12345"
os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"] = (
'["subnet-00ffc51481090f2d4", "subnet-061322cd815e741e9", "subnet-089eccb47c3d29bf8"]'
)
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_SG_ID"] = "sg-084c0dd9dc65c6937"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_NAME"] = "mydomain"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_ENDPOINT"] = (
"vpc-aws-solutions--367e660c-k57drotm5ampt5nt7ftfnse4pi.us-west-2.es.amazonaws.com"
)
os.environ["SEEDFARMER_PARAMETER_ROSBAG_STREAM_ARN"] = (
"arn:aws:dynamodb:us-west-2:123456789012:table//stream/2023-08-15T03:16:51.909"
)
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults, mocker):
# mocker.patch("stack.PythonLayerVersion", return_value=None)
# mocker.patch("stack.PythonFunction", return_value=None)
# mocker.patch("stack.PythonFunction.add_event_source_mapping", return_value=None)
mocker.patch("stack.DDBtoOpensearch", return_value=None)
try:
import app # noqa: F401
except AttributeError:
pass
def test_missing_vpc_id(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_VPC_ID"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_missing_subnet_id(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/tests/test_stack.py | modules/integration/ddb-to-opensearch/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults, mocker):
import stack
app = cdk.App()
proj_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
mocker.patch("stack.PythonLayerVersion", return_value=None)
mocker.patch("stack.PythonFunction", return_value=None)
try:
_ = stack.DDBtoOpensearch(
scope=app,
id=f"{proj_name}-{dep_name}-{mod_name}",
project=proj_name,
deployment=dep_name,
module=mod_name,
vpc_id="vpc-12345",
private_subnet_ids='["subnet-00ffc51481090f2d4", "subnet-061322cd815e741e9", "subnet-089eccb47c3d29bf8"]',
opensearch_sg_id="sg-084c0dd9dc65c6937",
opensearch_domain_endpoint="vpc-aws-solutions--367e660c-something.us-west-2.es.amazonaws.com",
opensearch_domain_name="mydomain",
ddb_stream_arn=(
"arn:aws:dynamodb:us-west-2:123456789012:table/aws-solutions-metadata-storage-"
"Rosbag-Scene-Metadata/stream/2023-08-15T03:16:51.909"
),
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
except AttributeError:
pass
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/tests/__init__.py | modules/integration/ddb-to-opensearch/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/ddb-to-opensearch/lambda/index.py | modules/integration/ddb-to-opensearch/lambda/index.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from datetime import date
from typing import Any, Dict
import boto3
import requests
from requests_aws4auth import AWS4Auth
region = os.getenv("REGION", "")
service = "es"
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(
credentials.access_key,
credentials.secret_key,
region,
service,
session_token=credentials.token,
)
host = os.getenv("DOMAIN_ENDPOINT", "")
index = "rosbag-metadata-scene-search"
type = "_doc"
headers = {"Content-Type": "application/json"}
def get_url() -> str:
ts = date.today()
return f"https://{host}/{index}-{ts}/{type}/"
def process_doc(records: Dict[Any, Any], doc: Dict[Any, Any]) -> None:
for key, value in records.items():
for param, val in value.items():
doc[key] = val
def handler(event, _context) -> str:
count = 0
for record in event["Records"]:
id_p = record["dynamodb"]["Keys"]["scene_id"]["S"]
if record["eventName"] != "REMOVE":
doc = {}
doc["scene_id"] = id_p
doc["bag_file"] = record["dynamodb"]["Keys"]["bag_file"]["S"]
document = record["dynamodb"]["NewImage"]
process_doc(records=document, doc=doc)
try:
requests.put(get_url() + id_p, auth=awsauth, json=doc, headers=headers, timeout=30)
except requests.exceptions.InvalidURL:
print("Error invoking endpoint - InvalidURL")
raise requests.exceptions.InvalidURL
except KeyError:
print("Could not process the payload")
count += 1
return str(count) + " records processed."
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/opensearch-tunnel/stack.py | modules/integration/opensearch-tunnel/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from typing import Any, cast
import aws_cdk
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk.aws_s3_assets import Asset
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class TunnelStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
deployment: str,
module: str,
project_name: str,
vpc_id: str,
opensearch_sg_id: str,
opensearch_domain_endpoint: str,
install_script: str,
port: int,
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project_name}-{deployment}")
dep_mod = f"{project_name}-{deployment}-{module}"
# CDK Env Vars
account: str = aws_cdk.Aws.ACCOUNT_ID
region: str = aws_cdk.Aws.REGION
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
os_security_group = ec2.SecurityGroup.from_security_group_id(self, f"{dep_mod}-os-sg", opensearch_sg_id)
os_security_group.connections.allow_from(
ec2.Peer.ipv4(cidr_ip=self.vpc.vpc_cidr_block),
ec2.Port.all_traffic(),
"allow all traffic from VPC CIDR",
)
# AMI
amzn_linux = ec2.MachineImage.latest_amazon_linux2023(
edition=ec2.AmazonLinuxEdition.STANDARD,
)
os_tunnel_document = iam.PolicyDocument(
statements=[
iam.PolicyStatement(
actions=[
"logs:CreateLogStream",
"logs:CreateLogGroup",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:GetLogRecord",
"logs:GetLogGroupFields",
"logs:GetQueryResults",
"logs:DescribeLogGroups",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:logs:{region}:{account}:log-group:*"],
),
iam.PolicyStatement(
actions=["sts:AssumeRole"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:iam::{account}:role/{project_name}-*"],
),
]
)
os_tunnel_role = iam.Role(
self,
f"{dep_mod}-os_tunnel_role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ec2.amazonaws.com"),
),
inline_policies={"CDKostunnelPolicyDocument": os_tunnel_document},
)
os_tunnel_role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore")
)
instance = ec2.Instance(
self,
f"{dep_mod}-OSTunnel",
instance_type=ec2.InstanceType("t3.micro"),
require_imdsv2=True,
machine_image=amzn_linux,
vpc=self.vpc,
security_group=os_security_group,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS),
role=os_tunnel_role,
block_devices=[
ec2.BlockDevice(
device_name="/dev/xvda",
volume=ec2.BlockDeviceVolume.ebs(10, encrypted=True),
)
],
)
asset = Asset(self, f"{dep_mod}-Asset", path=install_script)
local_path = instance.user_data.add_s3_download_command(bucket=asset.bucket, bucket_key=asset.s3_object_key)
args = opensearch_domain_endpoint + " " + str(port)
instance.user_data.add_execute_file_command(file_path=local_path, arguments=args)
asset.grant_read(instance.role)
self.instance_id = instance.instance_id
url = f"http://localhost:{port}/_dashboards/"
self.dashboard_url = url
json_params = {"portNumber": [str(port)], "localPortNumber": [str(port)]}
self.command = (
f"aws ssm start-session --target {self.instance_id} "
"--document-name AWS-StartPortForwardingSession "
f"--parameters '{json.dumps(json_params)}'"
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-EC23",
"reason": "Access is uin a private subnet",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-EC28",
"reason": "Detailed Monitoring not enabled as this is a simple tunnel",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-EC29",
"reason": "ASG not enabled as this is a simple tunnel",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/opensearch-tunnel/app.py | modules/integration/opensearch-tunnel/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import TunnelStack
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID"))
opensearch_sg_id = os.getenv(_param("OPENSEARCH_SG_ID"))
opensearch_domain_endpoint = os.getenv(_param("OPENSEARCH_DOMAIN_ENDPOINT"))
if not vpc_id:
raise ValueError("missing input parameter vpc-id")
if not opensearch_sg_id:
raise ValueError("missing input parameter opensearch_sg_id")
if not opensearch_domain_endpoint:
raise ValueError("missing input parameter opensearch_domain_endpoint")
port = int(os.getenv(_param("PORT"), "3000"))
project_dir = os.path.dirname(os.path.abspath(__file__))
install_script = os.path.join(project_dir, "install_nginx.sh")
def generate_description() -> str:
soln_id = os.getenv(_param("SOLUTION_ID"), None)
soln_name = os.getenv(_param("SOLUTION_NAME"), None)
soln_version = os.getenv(_param("SOLUTION_VERSION"), None)
desc = "Opensearch Tunnel"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = TunnelStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project_name=project_name,
deployment=deployment_name,
module=module_name,
vpc_id=vpc_id,
opensearch_sg_id=opensearch_sg_id,
opensearch_domain_endpoint=opensearch_domain_endpoint,
install_script=install_script,
port=port,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"OpenSearchTunnelInstanceId": stack.instance_id,
"OpenSearchTunnelUrl": stack.dashboard_url,
"OpenSearchTunnelPort": port,
"SampleSSMCommand": stack.command,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/opensearch-tunnel/tests/test_app.py | modules/integration/opensearch-tunnel/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-12345"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_ENDPOINT"] = (
"vpc-addf-aws-solutions--367e660c-something.us-west-2.es.amazonaws.com"
)
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_SG_ID"] = "sg-084c0dd9dc65c6937"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_missing_vpc_id(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_VPC_ID"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_missing_domain_endpoint(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_ENDPOINT"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_missing_domain_sg(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_SG_ID"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/opensearch-tunnel/tests/test_stack.py | modules/integration/opensearch-tunnel/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-12345"
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_DOMAIN_ENDPOINT"] = (
"vpc-addf-aws-solutions--367e660c-something.us-west-2.es.amazonaws.com"
)
os.environ["SEEDFARMER_PARAMETER_OPENSEARCH_SG_ID"] = "sg-084c0dd9dc65c6937"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
project_dir = os.path.dirname(os.path.abspath(__file__))
install_script = os.path.join(project_dir, "..", "install_nginx.sh")
tunnel = stack.TunnelStack(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment=dep_name,
module=mod_name,
vpc_id="vpc-12345",
opensearch_sg_id="sg-084c0dd9dc65c6937",
opensearch_domain_endpoint="vpc-addf-aws-solutions--367e660c-something.us-west-2.es.amazonaws.com",
install_script=install_script,
port=3333,
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(tunnel)
template.resource_count_is("AWS::IAM::Role", 1)
template.resource_count_is("AWS::IAM::InstanceProfile", 1)
template.resource_count_is("AWS::EC2::Instance", 1)
template.resource_count_is("AWS::EC2::SecurityGroupIngress", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/opensearch-tunnel/tests/__init__.py | modules/integration/opensearch-tunnel/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/efs-on-eks/stack_efs_eks.py | modules/integration/efs-on-eks/stack_efs_eks.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_eks as eks
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
project_dir = os.path.dirname(os.path.abspath(__file__))
class EFSFileStorageOnEKS(Stack):
def __init__(
self,
scope: Construct,
id: str,
deployment_name: str,
module_name: str,
efs_file_system_id: str,
efs_security_group_id: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_oidc_arn: str,
eks_cluster_security_group_id: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack connects an existing EFS to an existing EKS",
**kwargs,
)
self.deployment_name = deployment_name
self.module_name = module_name
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"addf-{self.deployment_name}")
dep_mod = f"addf-{self.deployment_name}-{self.module_name}"
dep_mod = dep_mod[:30]
# Import EKS Cluster
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_admin_role_arn,
open_id_connect_provider=provider,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
efs_security_group = ec2.SecurityGroup.from_security_group_id(self, "EKSSecurityGroup", efs_security_group_id)
eks_security_group = ec2.SecurityGroup.from_security_group_id(
self, "EFSSecurityGroup", eks_cluster_security_group_id
)
efs_security_group.connections.allow_from(
eks_security_group,
ec2.Port.tcp(2049),
"allowtraffic from EKS nodes over port 2049",
)
# Set up the StorageClass pointing at the new CSI Driver
# https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml
self.storage_class_name = f"{module_name}-efs"
eks_cluster.add_manifest(
"EFSCSIStorageClass",
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {"name": self.storage_class_name},
"provisioner": "efs.csi.aws.com",
"parameters": {
"provisioningMode": "efs-ap",
"fileSystemId": efs_file_system_id,
"directoryPerms": "700",
"gidRangeStart": "1000",
"gidRangeEnd": "2000",
"basePath": "/dynamic_provisioning",
},
},
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/efs-on-eks/app.py | modules/integration/efs-on-eks/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
import aws_cdk
from aws_cdk import App, CfnOutput
from stack_efs_eks import EFSFileStorageOnEKS
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"))
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"))
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"))
eks_cluster_sg_id = os.getenv(_param("EKS_CLUSTER_SECURITY_GROUP_ID"))
efs_file_system_id = os.getenv(_param("EFS_FILE_SYSTEM_ID"))
efs_security_group_id = os.getenv(_param("EFS_SECURITY_GROUP_ID"))
app = App()
efs_stack = EFSFileStorageOnEKS(
scope=app,
id=f"addf-{deployment_name}-{module_name}",
deployment_name=deployment_name,
module_name=module_name,
efs_file_system_id=cast(str, efs_file_system_id),
efs_security_group_id=cast(str, efs_security_group_id),
eks_cluster_name=cast(str, eks_cluster_name),
eks_admin_role_arn=cast(str, eks_admin_role_arn),
eks_oidc_arn=cast(str, eks_oidc_arn),
eks_cluster_security_group_id=cast(str, eks_cluster_sg_id),
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=efs_stack,
id="metadata",
value=efs_stack.to_json_string(
{
"EFSStorageClassName": efs_stack.storage_class_name,
"EKSClusterName": eks_cluster_name,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/efs-on-eks/tests/test_stack.py | modules/integration/efs-on-eks/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["ADDF_PROJECT_NAME"] = "test-project"
os.environ["ADDF_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["ADDF_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack_efs_eks
app = cdk.App()
dep_name = "test-deployment"
mod_name = "test-module"
stack_efs_eks.EFSFileStorageOnEKS(
scope=app,
id=f"addf-{dep_name}-{mod_name}",
deployment_name=dep_name,
module_name=mod_name,
efs_file_system_id="foobar",
efs_security_group_id="sg-0123456",
eks_cluster_name="myekscluster",
eks_admin_role_arn="arn:aws:iam::123456789012:role/eks-admin-role",
eks_oidc_arn="arn:aws:iam::123456789012:oidc-provider/server.example.com",
eks_cluster_security_group_id="sg-0123456",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/efs-on-eks/tests/__init__.py | modules/integration/efs-on-eks/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/emr-to-opensearch/stack.py | modules/integration/emr-to-opensearch/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, List, cast
import cdk_nag
from aws_cdk import Duration, Stack, Tags
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import aws_lambda as lambda_
from aws_cdk import aws_s3 as s3
from aws_cdk import aws_s3_notifications as s3n
from aws_cdk.aws_lambda_python_alpha import PythonFunction, PythonLayerVersion
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class EMRtoOpensearch(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project: str,
deployment: str,
module: str,
vpc_id: str,
private_subnet_ids: List[str],
opensearch_sg_id: str,
opensearch_domain_endpoint: str,
opensearch_domain_name: str,
logs_bucket_name: str,
emr_logs_prefix: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack integrates EMR Cluster with Opensearch cluster for ADDF",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
dep_mod = f"{project}-{deployment}-{module}"
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
self.private_subnets = []
for idx, subnet_id in enumerate(private_subnet_ids):
self.private_subnets.append(ec2.Subnet.from_subnet_id(scope=self, id=f"subnet{idx}", subnet_id=subnet_id))
os_security_group = ec2.SecurityGroup.from_security_group_id(self, f"{dep_mod}-os-sg", opensearch_sg_id)
# Import Shared Logs bucket
logs_bucket = s3.Bucket.from_bucket_name(self, f"{dep_mod}-logs-bucket", logs_bucket_name)
emr_os_lambda_policy = iam.PolicyDocument(
statements=[
iam.PolicyStatement(
actions=[
"es:ESHttpPost",
"es:ESHttpPut",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:es:{self.region}:{self.account}:domain/{opensearch_domain_name}*"
],
),
iam.PolicyStatement(
actions=[
"s3:ListBucket",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:s3:::{logs_bucket_name}"],
),
iam.PolicyStatement(
actions=[
"s3:GetObject",
"s3:GetObjectAcl",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:s3:::{logs_bucket_name}/{emr_logs_prefix}*"],
),
iam.PolicyStatement(
actions=[
"logs:CreateLogStream",
"logs:CreateLogGroup",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:GetLogRecord",
"logs:GetLogGroupFields",
"logs:GetQueryResults",
"logs:DescribeLogGroups",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:logs:{self.region}:{self.account}:log-group:*"],
),
iam.PolicyStatement(
actions=["ec2:Create*", "ec2:Delete*", "ec2:Describe*"],
effect=iam.Effect.ALLOW,
resources=["*"],
),
]
)
emr_os_lambda_role = iam.Role(
self,
f"{dep_mod}-lambda-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("lambda.amazonaws.com"),
),
inline_policies={"EMRtoOSPolicyDocument": emr_os_lambda_policy},
)
requests_awsauth_layer = PythonLayerVersion(
self,
id=f"{dep_mod}-requests-aws4auth",
entry="layers/",
layer_version_name=f"{dep_mod}-requests-aws4auth",
compatible_runtimes=[lambda_.Runtime.PYTHON_3_11],
)
lambda_trigger = PythonFunction(
self,
"EMRlogstoOSTriggerLambda",
entry="lambda",
runtime=lambda_.Runtime.PYTHON_3_11,
index="index.py",
handler="handler",
timeout=Duration.minutes(1),
security_groups=[os_security_group],
vpc=self.vpc,
vpc_subnets=ec2.SubnetSelection(subnets=self.private_subnets),
environment={
"REGION": self.region,
"ACCOUNT": self.account,
"DOMAIN_ENDPOINT": opensearch_domain_endpoint,
},
role=emr_os_lambda_role,
layers=[requests_awsauth_layer],
)
logs_bucket.add_event_notification(
s3.EventType.OBJECT_CREATED, # Event
s3n.LambdaDestination(lambda_trigger), # Dest
s3.NotificationKeyFilter(prefix=emr_logs_prefix, suffix="stderr.gz"), # Prefix
)
logs_bucket.add_event_notification(
s3.EventType.OBJECT_CREATED, # Event
s3n.LambdaDestination(lambda_trigger), # Dest
s3.NotificationKeyFilter(prefix=emr_logs_prefix, suffix="controller.gz"), # Prefix
)
self.lambda_name = lambda_trigger.function_name
self.lambda_arn = lambda_trigger.function_arn
cdk_nag.NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
cdk_nag.NagPackSuppression(
id="AwsSolutions-IAM4",
reason="Managed Policies are for service account roles only",
),
cdk_nag.NagPackSuppression(
id="AwsSolutions-IAM5",
reason="Resource access restriced to ADDF resources",
),
],
)
cdk_nag.NagSuppressions.add_resource_suppressions(
lambda_trigger,
suppressions=[
cdk_nag.NagPackSuppression(
id="AwsSolutions-L1",
reason="We don't want this to start failing as soon as a new version is released",
),
],
apply_to_children=True,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/emr-to-opensearch/app.py | modules/integration/emr-to-opensearch/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
import cdk_nag
from stack import EMRtoOpensearch
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID")) # required
private_subnet_ids = json.loads(os.getenv(_param("PRIVATE_SUBNET_IDS"), "")) # required
opensearch_sg_id = os.getenv(_param("OPENSEARCH_SG_ID"), "")
opensearch_domain_name = os.getenv(_param("OPENSEARCH_DOMAIN_NAME"), "")
opensearch_domain_endpoint = os.getenv(_param("OPENSEARCH_DOMAIN_ENDPOINT"), "")
logs_bucket_name = os.getenv(_param("LOGS_BUCKET_NAME"), "")
emr_logs_prefix = os.getenv(_param("EMR_LOGS_PREFIX"), "")
if not vpc_id:
raise Exception("missing input parameter vpc-id")
if not private_subnet_ids:
raise Exception("missing input parameter private-subnet-ids")
app = aws_cdk.App()
stack = EMRtoOpensearch(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project=project_name,
deployment=deployment_name,
module=module_name,
vpc_id=vpc_id,
private_subnet_ids=private_subnet_ids,
opensearch_sg_id=opensearch_sg_id,
opensearch_domain_endpoint=opensearch_domain_endpoint,
opensearch_domain_name=opensearch_domain_name,
logs_bucket_name=logs_bucket_name,
emr_logs_prefix=emr_logs_prefix,
)
aws_cdk.CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"LambdaName": stack.lambda_name,
"LambdaArn": stack.lambda_arn,
}
),
)
aws_cdk.Aspects.of(app).add(cdk_nag.AwsSolutionsChecks(log_ignores=True))
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/emr-to-opensearch/tests/test_stack.py | modules/integration/emr-to-opensearch/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/emr-to-opensearch/tests/__init__.py | modules/integration/emr-to-opensearch/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/integration/emr-to-opensearch/lambda/index.py | modules/integration/emr-to-opensearch/lambda/index.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import datetime
import gzip
import hashlib
import io
import logging
import os
import re
from urllib.parse import unquote_plus
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch import helpers as es_helpers
from requests_aws4auth import AWS4Auth
# vars
region = os.getenv("REGION", "")
host = os.getenv("DOMAIN_ENDPOINT", "")
index = "emrlogs"
ES_BULK_BATCH_SIZE = 1000
# Client connections
s3 = boto3.client("s3")
# Logging
_logger: logging.Logger = logging.getLogger(__name__)
def get_file_timestamp(bucket_name, object_key):
response = s3.head_object(
Bucket=bucket_name,
Key=object_key,
)
return response["LastModified"]
def download_logs(bucket_name, object_key):
# Download the *.gz file
gz_file = io.BytesIO()
s3.download_fileobj(bucket_name, object_key, gz_file)
# Decompress
gz_file.seek(0)
log_file = gzip.GzipFile(fileobj=gz_file)
# Decode into text
log_content = log_file.read().decode("UTF-8")
return log_content.splitlines()
def enrich_log(log_entry):
# Extract EMR cluster/step ID from the file path
re_match = re.search(r"/(j-[\w]+)/steps/(s-[\w]+)/", log_entry["log_file"])
if re_match:
log_entry["emr_cluster_id"] = re_match.group(1)
log_entry["emr_step_id"] = re_match.group(2)
else:
re_match = re.search(r"/(j-[\w]+)/", log_entry["log_file"])
if re_match:
log_entry["emr_cluster_id"] = re_match.group(1)
def transform_log(raw_log, line_number, file_timestamp, key, bucket, region):
log_entry = {
"raw_log": raw_log,
}
log_entry["log_file_line_number"] = line_number
log_entry["log_file"] = f"s3://{bucket}/{key}"
log_entry["log_file_timestamp"] = file_timestamp.isoformat()
log_entry["aws_region"] = region
enrich_log(log_entry)
return log_entry
def create_log_id(log_entry):
raw_id = "{}|{}".format(log_entry["log_file"], log_entry["log_file_line_number"])
return hashlib.sha256(bytes(raw_id.encode("utf-8"))).hexdigest()
def store_logs(logs, es_client):
bulk_logs = [
{
"_index": f"{index}-{datetime.datetime.utcnow().date().isoformat()}",
"_type": "emr_log",
"_id": create_log_id(log),
"_source": log,
}
for log in logs
]
response = es_helpers.bulk(es_client, bulk_logs)
_logger.info("RESPONSE: %s", response)
def create_es_client():
service = "es"
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(
credentials.access_key,
credentials.secret_key,
region,
service,
session_token=credentials.token,
)
return Elasticsearch(
hosts=[{"host": host, "port": 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection,
)
def handler(event, _context):
es = create_es_client()
for record in event["Records"]:
bucket = record["s3"]["bucket"]["name"]
key = unquote_plus(record["s3"]["object"]["key"])
file_timestamp = get_file_timestamp(bucket, key)
raw_logs = download_logs(bucket, key)
total_count = len(raw_logs)
_logger.info("Number of entries in the log: %s", total_count)
batch = []
batch_number = 1
skipped_entries_count = 0
for line_number, line in enumerate(raw_logs, start=1):
if not line.strip():
skipped_entries_count += 1
else:
log_entry = transform_log(
line,
line_number,
file_timestamp,
key,
bucket,
region,
)
batch.append(log_entry)
if len(batch) >= ES_BULK_BATCH_SIZE or line_number == total_count:
_logger.info(f"Saving batch {batch_number} containing {len(batch)} entries...")
store_logs(batch, es)
batch = []
batch_number += 1
if skipped_entries_count > 0:
_logger.debug(f"Skipped {skipped_entries_count} entries")
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/stack.py | modules/workbench/cloud9/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Optional
import aws_cdk.aws_cloud9 as cloud9
from aws_cdk import Stack
from constructs import Construct
class Cloud9Stack(Stack):
"""
Creates a Cloud9 instance
Parameters
----------
Stack : Stack
"""
def __init__(
self,
scope: Construct,
id: str,
image_id: str,
connection_type: Optional[str],
instance_stop_time_minutes: Optional[int],
instance_type: str,
name: Optional[str],
owner_arn: str,
subnet_id: str,
**kwargs: Any,
) -> None:
"""_summary_
_extended_summary_
Parameters
----------
scope : Construct
The construct's parent
id : str
The unique identifier of the resource
connection_type: str
The connection type used for connecting to an Amazon EC2 environment.
Valid values are CONNECT_SSM (default) and CONNECT_SSH
image_id : str
The identifier for the Amazon Machine Image (AMI) that's used to create
the EC2 instance. To choose an AMI for the instance, you must specify a
valid AMI alias or a valid AWS Systems Manager path.
instance_stop_time_minutes : int
The number of minutes until the running instance is shut down after the
environment was last used
instance_type : str
Type of instance to launch
name : str
The name of the Cloud9 instance
owner_arn : str
The Amazon Resource Name (ARN) of the environment owner. This ARN can be the
ARN of any AWS Identity and Access Management principal. If this value is
not specified, the ARN defaults to this environment's creator.
subnet_id : str
The ID of the subnet in Amazon Virtual Private Cloud (Amazon VPC) that AWS
Cloud9 will use to communicate with the Amazon Elastic Compute Cloud
(Amazon EC2) instance
"""
super().__init__(
scope,
id,
description="This stack deploys Networking resources for ADDF",
**kwargs,
)
self.cloud9_instance = cloud9.CfnEnvironmentEC2(
self,
"Cloud9Env",
instance_type=instance_type,
owner_arn=owner_arn,
subnet_id=subnet_id,
# the properties below are optional
image_id=image_id,
automatic_stop_time_minutes=instance_stop_time_minutes,
connection_type=connection_type,
name=name,
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/app.py | modules/workbench/cloud9/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import aws_cdk
import boto3
from aws_cdk import App, CfnOutput
from stack import Cloud9Stack
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
def is_ami_valid(image_id: str) -> None:
if image_id.startswith("ami"):
ssmc = boto3.client("ssm")
cloud9_params = ssmc.get_parameters_by_path(Path="/aws/service/cloud9", Recursive=True)
for param in cloud9_params.get("Parameters", []):
if param["Value"] == image_id:
image_id = f"resolve:ssm:{param['Name']}"
break
else:
raise ValueError(
(
f"The AMI provided `{image_id}` is not a valid AMI supported by Cloud9. "
"For a list of valid images, check the README or run the following command: "
"`aws ssm get-parameters-by-path --path '/aws/service/cloud9' --recursive` "
)
)
project_name = os.getenv("SEEDFARMER_PROJECT_NAME")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME")
module_name = os.getenv("SEEDFARMER_MODULE_NAME")
connection_type = os.getenv(_param("CONNECTION_TYPE"), "CONNECT_SSM")
image_id = os.getenv(_param("IMAGE_ID"), "ubuntu-18.04-x86_64")
instance_stop_time_minutes = int(os.getenv(_param("INSTANCE_STOP_TIME_MINUTES"), 60))
instance_type = os.getenv(_param("INSTANCE_TYPE"), None)
name = os.getenv(_param("INSTANCE_NAME"), "cloud9env")
owner_arn = os.getenv(_param("OWNER_ARN"), None)
storage_size = os.getenv(_param("STORAGE_SIZE"), 20)
subnet_id = os.getenv(_param("SUBNET_ID"), None)
is_ami_valid(image_id=image_id)
if instance_type is None:
raise ValueError("Parameter `instance_type` not found.")
if owner_arn is None:
raise ValueError("Parameter `owner_arn` not found. You will not be able to access your env")
if subnet_id is None:
raise ValueError("Parameter `subnet_id` not found")
app = App()
stack = Cloud9Stack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
connection_type=connection_type,
image_id=image_id,
instance_stop_time_minutes=instance_stop_time_minutes,
instance_type=instance_type,
name=name,
owner_arn=owner_arn,
subnet_id=subnet_id,
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"Cloud9EnvName": stack.cloud9_instance.attr_name,
"Cloud9EnvArn": stack.cloud9_instance.attr_arn,
"InstanceStorageSize": storage_size,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/scripts/update_root_vol.py | modules/workbench/cloud9/scripts/update_root_vol.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import boto3
import botocore
LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d] %(message)s"
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
_logger: logging.Logger = logging.getLogger(__name__)
ec2_client = boto3.client("ec2")
SEEDFARMER_METADATA = json.loads(os.getenv("SEEDFARMER_MODULE_METADATA")) # type: ignore
cloud9_arn = SEEDFARMER_METADATA.get("Cloud9EnvArn")
cloud9_env_id = cloud9_arn.split(":")[-1]
volume_size = int(SEEDFARMER_METADATA.get("InstanceStorageSize"))
res = ec2_client.describe_instances(Filters=[{"Name": "tag:aws:cloud9:environment", "Values": [cloud9_env_id]}])
full_cloud9_instance_name = [
tag["Value"] for tag in res["Reservations"][0]["Instances"][0]["Tags"] if tag["Key"] == "Name"
][0]
instance_id = res["Reservations"][0]["Instances"][0]["InstanceId"]
volume_id = res["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0]["Ebs"]["VolumeId"]
try:
ec2_client.create_tags(
Resources=[instance_id, volume_id],
Tags=[
{"Key": "SEEDFARMER_PROJECT_NAME", "Value": os.getenv("SEEDFARMER_PROJECT_NAME")},
{"Key": "SEEDFARMER_DEPLOYMENT_NAME", "Value": os.getenv("SEEDFARMER_DEPLOYMENT_NAME")},
{"Key": "SEEDFARMER_MODULE_NAME", "Value": os.getenv("SEEDFARMER_MODULE_NAME")},
{"Key": "Name", "Value": full_cloud9_instance_name},
],
)
except Exception as err:
raise err
try:
ec2_client.modify_volume(
VolumeId=volume_id,
Size=volume_size,
)
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == "VolumeModificationRateExceeded":
_logger.info(err)
else:
raise Exception(err)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/scripts/pre_deploy.py | modules/workbench/cloud9/scripts/pre_deploy.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import time
from typing import Any, Dict, Tuple
import boto3
LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d] %(message)s"
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
_logger: logging.Logger = logging.getLogger(__name__)
iam_client = boto3.client("iam")
role_name = "AWSCloud9SSMAccessRole"
instance_profile_name = "AWSCloud9SSMInstanceProfile"
partition = os.getenv("AWS_PARTITION", "aws")
def attach_role_to_instance_profile(instance_profile_name: str, role_name: str) -> None:
try:
iam_client.add_role_to_instance_profile(InstanceProfileName=instance_profile_name, RoleName=role_name)
except Exception as err:
raise err
def check_access_role_exist(role_name: str) -> bool:
try:
iam_client.get_role(RoleName=role_name)
_logger.info(f"Using existing role {role_name}.")
return True
except iam_client.exceptions.NoSuchEntityException:
_logger.warning(f"The role {role_name} does not exist")
return False
def check_instance_profile_exist(
instance_profile_name: str,
) -> Tuple[bool, Dict[str, Any]]:
try:
instance_profile_data = iam_client.get_instance_profile(InstanceProfileName=instance_profile_name)
_logger.info(f"Using existing instance profile {instance_profile_name}.")
return (True, instance_profile_data)
except iam_client.exceptions.NoSuchEntityException:
_logger.info(f"Instance profile {instance_profile_name} does not exist")
return (False, {})
def create_access_role(role_name: str) -> None:
assume_role_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": ["cloud9.amazonaws.com", "ec2.amazonaws.com"]},
"Action": ["sts:AssumeRole"],
}
],
}
try:
_logger.info(f"Creating role {role_name}")
iam_client.create_role(
AssumeRolePolicyDocument=json.dumps(assume_role_policy),
Description="Service linked role for AWS Cloud9",
Path="/service-role/",
RoleName=role_name,
Tags=[
{"Key": "SEEDFARMER_PROJECT_NAME", "Value": os.getenv("SEEDFARMER_PROJECT_NAME")},
{
"Key": "SEEDFARMER_DEPLOYMENT_NAME",
"Value": os.getenv("SEEDFARMER_DEPLOYMENT_NAME"),
},
{"Key": "SEEDFARMER_MODULE_NAME", "Value": os.getenv("SEEDFARMER_MODULE_NAME")},
],
)
except Exception as err:
raise err
time.sleep(5)
try:
_logger.info(f"Attaching policy/AWSCloud9SSMInstanceProfile to {role_name}")
iam_client.attach_role_policy(
RoleName=role_name,
PolicyArn=f"arn:{partition}:iam::aws:policy/AWSCloud9SSMInstanceProfile",
)
except Exception as err:
raise err
def create_instance_profile(instance_profile_name: str) -> None:
_logger.info(f"Creating instance profile {instance_profile_name}")
try:
iam_client.create_instance_profile(
InstanceProfileName=instance_profile_name,
Path="/cloud9/",
)
except iam_client.exceptions.NoSuchEntityException:
_logger.warning(f"The role {role_name} does not exist")
if os.getenv("SEEDFARMER_PARAMETER_CONNECTION_TYPE", "CONNECT_SSM") == "CONNECT_SSM":
if not check_access_role_exist(role_name=role_name):
create_access_role(role_name=role_name)
instance_profile_exists, instance_profile_data = check_instance_profile_exist(
instance_profile_name=instance_profile_name
)
if not instance_profile_exists:
create_instance_profile(instance_profile_name=instance_profile_name)
else:
for role in instance_profile_data.get("InstanceProfile", {}).get("Roles", []):
if role["RoleName"] == role_name:
break
else:
try:
attach_role_to_instance_profile(instance_profile_name=instance_profile_name, role_name=role_name)
except Exception as err:
raise err
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/tests/test_stack.py | modules/workbench/cloud9/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/workbench/cloud9/tests/__init__.py | modules/workbench/cloud9/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/stack.py | modules/pre-processing/image-extraction/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from typing import Any, Optional, cast
import aws_cdk.aws_batch_alpha as batch
import aws_cdk.aws_ecr as ecr
import aws_cdk.aws_ecs as ecs
import aws_cdk.aws_iam as iam
from aws_cdk import Aspects, Duration, Stack, Tags
from aws_cdk import aws_events as events
from aws_cdk import aws_s3 as s3
from aws_cdk import aws_stepfunctions as stepfunctions
from aws_cdk import aws_stepfunctions_tasks as step_functions_tasks
from aws_cdk.aws_ecr_assets import DockerImageAsset
from aws_solutions_constructs.aws_s3_stepfunctions import S3ToStepfunctions
from cdk_ecr_deployment import DockerImageName, ECRDeployment
from cdk_nag import AwsSolutionsChecks, NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class ImageExtraction(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
repository_name: str,
artifacts_bucket_name: str,
platform: str, # FARGATE or EC2
retries: int,
timeout_seconds: int,
vcpus: int,
memory_limit_mib: int,
on_demand_job_queue_arn: str,
start_range: Optional[str] = None,
end_range: Optional[str] = None,
**kwargs: Any, # type: ignore
) -> None:
super().__init__(
scope,
id,
**kwargs,
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
# used to tag AWS resources. Tag Value length cant exceed 256 characters
full_dep_mod = dep_mod[:256] if len(dep_mod) > 256 else dep_mod
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=full_dep_mod)
# Build ImageExtraction Docker Image
repo = ecr.Repository.from_repository_name(
self, id=full_dep_mod + repository_name, repository_name=repository_name
)
local_image = DockerImageAsset(
self,
"ImageExtractionDockerImage",
directory=os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"),
)
image_uri = f"{repo.repository_uri}:latest"
ECRDeployment(
self,
"ImageURI",
src=DockerImageName(local_image.image_uri),
dest=DockerImageName(image_uri),
)
policy_statements = [
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["s3:ListAllMyBuckets"],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
actions=["s3:ListBucket", "s3:GetBucketLocation"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:s3:::{project_name}-*"],
),
iam.PolicyStatement(
actions=[
"s3:GetObject",
"s3:GetObjectAcl",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:DeleteObject",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:s3:::{project_name}-*/*"],
),
]
policy_document = iam.PolicyDocument(statements=policy_statements)
role = iam.Role(
self,
f"{repository_name}-batch-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
),
inline_policies={"ExtractionPolicyDocument": policy_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
],
max_session_duration=Duration.hours(12),
)
# Build ImageExtraction AWS Batch Job Definition
self.batch_job = batch.JobDefinition(
self,
"batch-job-defintion-from-ecr",
container=batch.JobDefinitionContainer(
image=ecs.ContainerImage.from_ecr_repository(repo, "latest"),
environment={
"AWS_DEFAULT_REGION": self.region,
"AWS_ACCOUNT_ID": self.account,
"DEBUG": "true",
},
job_role=role,
execution_role=role,
memory_limit_mib=memory_limit_mib,
vcpus=vcpus,
privileged=True,
),
job_definition_name=repository_name,
platform_capabilities=[
batch.PlatformCapabilities.FARGATE if platform == "FARGATE" else batch.PlatformCapabilities.EC2
],
retry_attempts=retries,
timeout=Duration.seconds(timeout_seconds),
)
# Invoke AWS Batch in Step Functions context
submit_image_extraction_job = step_functions_tasks.BatchSubmitJob(
self,
f"{dep_mod}-Batchjob",
job_name=f"{project_name}-image-extraction-job",
job_queue_arn=on_demand_job_queue_arn,
job_definition_arn=self.batch_job.job_definition_arn,
container_overrides=step_functions_tasks.BatchContainerOverrides(
environment={
"ARTIFACTS_BUCKET": stepfunctions.JsonPath.string_at("$.detail.bucket.name"),
"KEY": stepfunctions.JsonPath.string_at("$.detail.object.key"),
# "START_RANGE": start_range,
# "END_RANGE": end_range,
},
),
state_name="Image Extraction Batch Job",
)
succeed_job = stepfunctions.Succeed(self, "Succeeded", comment="AWS Batch Job succeeded")
# Create Chain
definition = submit_image_extraction_job.next(succeed_job)
# Trigger StepFunction for S3 Events
S3ToStepfunctions(
self,
"S3ToStepFunctions",
existing_bucket_obj=s3.Bucket.from_bucket_name(self, "importedbucket", bucket_name=artifacts_bucket_name),
event_rule_props=events.RuleProps(
event_pattern=events.EventPattern(
source=["aws.s3"],
detail_type=["Object Created"],
detail={
"bucket": {"name": [artifacts_bucket_name]},
"object": {"key": [{"suffix": ".jsq"}]},
},
)
),
state_machine_props=stepfunctions.StateMachineProps(
definition=definition,
state_machine_name=f"{deployment_name}-{module_name}-S3FileProcessing",
),
)
self.role = role
self.image_uri = image_uri
Aspects.of(self).add(AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": f"Resource access restriced to {project_name} resources",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-SF1",
"reason": "Step Function does not need to log ALL events to CloudWatch Logs",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-SF2",
"reason": "Step Function does not need to have X Ray tracing enabled",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/app.py | modules/pre-processing/image-extraction/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import ImageExtraction
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
retries = int(os.getenv(_param("RETRIES"), 1))
timeout_seconds = int(os.getenv(_param("TIMEOUT_SECONDS"), 60)) # optional
# start_range = os.getenv(_param("START_RANGE")) # optional
# end_range = os.getenv(_param("END_RANGE")) # optional
vcpus = int(os.getenv(_param("VCPUS"), 4))
memory_limit_mib = int(os.getenv(_param("MEMORY_MIB"), 16384))
on_demand_job_queue_arn = os.getenv(_param("ON_DEMAND_JOB_QUEUE_ARN")) # required
repository_name = os.getenv(_param("REPOSITORY_NAME")) # required
artifacts_bucket_name = os.getenv(_param("ARTIFACTS_BUCKET_NAME"))
platform = os.getenv(_param("PLATFORM"), "EC2")
if not on_demand_job_queue_arn:
raise ValueError("Batch Queue Configuration is missing.")
if not repository_name:
raise ValueError("ECR Repository Name is missing.")
if not artifacts_bucket_name:
raise ValueError("S3 Bucket is missing.")
if platform not in ["FARGATE", "EC2"]:
raise ValueError("Platform must be either FARGATE or EC2")
app = App()
stack = ImageExtraction(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
retries=retries,
timeout_seconds=timeout_seconds,
vcpus=vcpus,
memory_limit_mib=memory_limit_mib,
repository_name=repository_name,
artifacts_bucket_name=artifacts_bucket_name,
on_demand_job_queue_arn=on_demand_job_queue_arn,
platform=platform,
# start_range=start_range,
# end_range=end_range,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"BatchExecutionRoleArn": stack.role.role_arn,
"ImageExtractionDkrImageUri": stack.image_uri,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/src/extract_images_from_jseq.py | modules/pre-processing/image-extraction/src/extract_images_from_jseq.py | """
This python script will extract images from a pair of JSEQ + IDX file
Assumption: Input .jsq + .idx file with the same name
Please note we default to save as .jpg extension as it's the raw picture
"""
import os
import sys
import time
class JseqHandler:
"""JseqHandler Class"""
def __init__(self, jsq_loc_in: str, idx_loc_in: str):
"""init"""
self.jseq_file_location = jsq_loc_in
self.idx_file_location = idx_loc_in
self.jseq_file_reader = open(self.jseq_file_location, "rb")
index_file_reader = open(self.idx_file_location, "rb")
self.indexes = []
index_location = index_file_reader.read(8)
t0_init = time.time()
while index_location != b"":
index = int.from_bytes(index_location, byteorder="little", signed=False)
self.indexes.append(index)
index_location = index_file_reader.read(8)
index_file_reader.close()
t1_init = time.time()
print(f"Elapsed time reading IDX File: {str(t1_init-t0_init)}")
def get_frame(self, frame_id: int):
"""Get the individual frame"""
t0_frame = time.time()
start_read_location = self.indexes[frame_id]
number_of_bytes_to_be_read = 0
if frame_id + 1 == len(self.indexes):
number_of_bytes_to_be_read = -1
else:
number_of_bytes_to_be_read = self.indexes[frame_id + 1] - self.indexes[frame_id]
self.jseq_file_reader.seek(start_read_location, 0)
output = self.jseq_file_reader.read(number_of_bytes_to_be_read)
t1_frame = time.time()
print(f"Elapsed time Single IDX File: {str(t1_frame-t0_frame)}")
return output
def __del__(self):
self.jseq_file_reader.close()
if __name__ == "__main__":
# Entry Point:
# - the first argument is the location of the JSEQ + IDX file, they need to
# share the same name
# - the second argument defines where the original files shall be located once
# processed so that we move them to another directory
# - the third argument defines where the extracted images shall be stored
mypath = sys.argv[1]
processed = sys.argv[2]
extracted = sys.argv[3]
# start_range = sys.argv[4]
# end_range = sys.argv[5]
# List the files in the provided directory and looking for the .JSQ and .IDX file
# We assume they have the same name
for path in os.listdir(mypath):
full_path_ext = os.path.join(mypath, path)
# Only process each jsq / idx file pair once...
if full_path_ext.endswith(".jsq"):
print(f"Working on file {full_path_ext}")
# Remove extension
file_name = os.path.splitext(full_path_ext)[0]
jsq_loc = file_name + ".jsq"
idx_loc = file_name + ".idx"
if os.path.isfile(jsq_loc) & os.path.isfile(idx_loc):
# Instante the JSEQ class such that it would load the index of the frames
jseq = JseqHandler(jsq_loc, idx_loc)
# Iterate each frame and extract and save them as individual frames and save to .jpg
print(f"-- About to start extracting {str(len(jseq.indexes) - 2)} images to {extracted}: ")
t0 = time.time()
# start_range = start_range if start_range else 0
# end_range = end_range if end_range else int(len(jseq.indexes) - 2)
start_range = 0
end_range = int(len(jseq.indexes) - 2)
if end_range > len(jseq.indexes) - 2 or end_range < start_range:
end_range = len(jseq.indexes) - 2
start_range = 0
for idx in range(start_range, end_range):
print(f"-- Extracting frame with id {str(idx)}")
image_frame = jseq.get_frame(idx)
image_name = f"extract{str(idx)}"
with open(f"{extracted}{image_name}.jpg", "wb") as extractedImage:
extractedImage.write(image_frame)
print(f"-- Just finished extracting with frame id: {str(idx)}")
t1 = time.time()
print(f"Elapsed time extracting all images File: {str(t1-t0)}")
else:
print(f"-- Skipping file {full_path_ext}")
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/tests/test_app.py | modules/pre-processing/image-extraction/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"] = "arn:aws:batch:XXX:111111111111:job-queue/demo-XXXX"
os.environ["SEEDFARMER_PARAMETER_MEMORY_LIMIT_MIB"] = "16384"
os.environ["SEEDFARMER_PARAMETER_PLATFORM"] = "EC2"
os.environ["SEEDFARMER_PARAMETER_RETRIES"] = "1"
os.environ["SEEDFARMER_PARAMETER_TIMEOUT_SECONDS"] = "1800"
os.environ["SEEDFARMER_PARAMETER_VCPUS"] = "2"
os.environ["SEEDFARMER_PARAMETER_ARTIFACTS_BUCKET_NAME"] = "artifacts-bucket"
os.environ["SEEDFARMER_PARAMETER_REPOSITORY_NAME"] = "test-repo"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_job_queue_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"]
with pytest.raises(Exception):
import app # noqa: F401
assert (
os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"]
== "arn:aws:batch:XXX:111111111111:job-queue/demo-XXXX"
)
def test_art_buckets_name(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ARTIFACTS_BUCKET_NAME"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_ARTIFACTS_BUCKET_NAME"] == "artifacts-bucket"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/tests/test_stack.py | modules/pre-processing/image-extraction/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
deployment_name = "test-deployment"
module_name = "test-module"
project_name = "test-project"
image_extraction_stack = stack.ImageExtraction(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
platform="EC2",
retries=1,
timeout_seconds=1800,
vcpus=2,
memory_limit_mib=8192,
repository_name="test-repo",
artifacts_bucket_name="artifacts-bucket",
on_demand_job_queue_arn="arn:aws:batch:XXX:111111111111:job-queue/demo-XXXX",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(image_extraction_stack)
template.resource_count_is("AWS::StepFunctions::StateMachine", 1)
template.resource_count_is("AWS::Events::Rule", 1)
template.resource_count_is("AWS::Batch::JobDefinition", 1)
# Check ecr custom resource runtime version
template.has_resource_properties(
type="AWS::Lambda::Function",
props={
"Runtime": "provided.al2023",
},
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/pre-processing/image-extraction/tests/__init__.py | modules/pre-processing/image-extraction/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-k8s-deployment/stack.py | modules/visualization/dcv-k8s-deployment/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from string import Template
from typing import Any, Optional, cast
import yaml
from aws_cdk import Environment, Stack, Tags
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from aws_cdk import aws_ssm as ssm
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
project_dir = os.path.dirname(os.path.abspath(__file__))
ADDF_DISPLAY_SOCKET_PATH = "/var/addf/dcv-eks/sockets"
ADDF_DEFAULT_DISPLAY_NUMER = ":0"
ADDF_SSM_PARAMETER_STORE_DISPLAY_NAME = "dcv-display"
ADDF_SSM_PARAMETER_STORE_MOUNT_PATH_NAME = "dcv-socket-mount-path"
class DcvEksStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
dcv_namespace: str,
dcv_image_uri: str,
eks_cluster_name: str,
eks_cluster_admin_role_arn: str,
eks_handler_role_arn: str,
eks_oidc_arn: str,
eks_cluster_open_id_connect_issuer: str,
eks_cluster_security_group_id: str,
eks_node_role_arn: str,
fsx_pvc_name: str,
env: Environment,
**kwargs: Any,
) -> None:
super().__init__(scope, id, **kwargs)
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
dep_mod = f"{self.project_name}-{self.deployment_name}-{self.module_name}"
dep_mod = dep_mod[:64]
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=dep_mod)
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
# create parameter store names
parameter_store_prefix = f"/{self.project_name}/{self.deployment_name}/{self.module_name}"
self.display_parameter_name = f"{parameter_store_prefix}/{ADDF_SSM_PARAMETER_STORE_DISPLAY_NAME}"
self.socket_mount_path_parameter_name = f"{parameter_store_prefix}/{ADDF_SSM_PARAMETER_STORE_MOUNT_PATH_NAME}"
self.add_ssm_parameter_store(self.display_parameter_name, self.socket_mount_path_parameter_name)
self.update_node_role_permissions(eks_node_role_arn, env.region)
self.eks_admin_role = self.add_eks_dcv_role(
eks_cluster_open_id_connect_issuer,
eks_oidc_arn,
parameter_store_prefix,
env,
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_cluster_admin_role_arn,
kubectl_lambda_role=iam.Role.from_role_arn(self, "KubectlHandlerArn", eks_handler_role_arn),
kubectl_layer=KubectlV29Layer(self, "KubectlV29Layer"),
open_id_connect_provider=provider,
)
t = Template(open(os.path.join(project_dir, "k8s/dcv-deployment.yaml"), "r").read())
dcv_agent_yaml_file = t.substitute(
NAMESPACE=dcv_namespace,
IMAGE=dcv_image_uri,
REGION=env.region,
# SOCKET_PATH=ADDF_DISPLAY_SOCKET_PATH,
# DISPLAY_PARAMETER_NAME=self.display_parameter_name,
FSX_PVC_NAME=fsx_pvc_name,
)
dcv_agent_yaml = yaml.safe_load(dcv_agent_yaml_file)
loop_iteration = 0
manifest_id = "DCVAgent" + str(loop_iteration)
loop_iteration += 1
dcv_agent_resource = eks_cluster.add_manifest(manifest_id, dcv_agent_yaml)
t = Template(open(os.path.join(project_dir, "k8s/dcv-permissions-setup.yaml"), "r").read())
dcv_agent_yaml_file = t.substitute(
NAMESPACE=dcv_namespace,
RUNTIME_ROLE_ARN=self.eks_admin_role.role_arn,
SOCKET_PATH=ADDF_DISPLAY_SOCKET_PATH,
)
dcv_agent_yaml = list(yaml.safe_load_all(dcv_agent_yaml_file))
for value in dcv_agent_yaml:
loop_iteration = loop_iteration + 1
manifest_id = "DCVAgent" + str(loop_iteration)
k8s_resource = eks_cluster.add_manifest(manifest_id, value)
dcv_agent_resource.node.add_dependency(k8s_resource)
t = Template(open(os.path.join(project_dir, "k8s/dcv-network-policy.yaml"), "r").read())
dcv_network_policy_yaml_file = t.substitute(NAMESPACE=dcv_namespace)
dcv_network_policy_yaml = yaml.safe_load(dcv_network_policy_yaml_file)
loop_iteration += 1
manifest_id = "DCVAgent" + str(loop_iteration)
network_policy_resource = eks_cluster.add_manifest(manifest_id, dcv_network_policy_yaml)
dcv_agent_resource.node.add_dependency(network_policy_resource)
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for src account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to resources",
}
),
],
)
def add_ssm_parameter_store(self, display_parameter_name: str, socket_mount_parameter_name: str) -> None:
ssm.StringParameter(
self,
"display-parameter",
description="DISPLAY environment variable for application pods",
parameter_name=display_parameter_name,
string_value=ADDF_DEFAULT_DISPLAY_NUMER,
)
ssm.StringParameter(
self,
"shared-dir-parameter",
description="Shared directory for application access display socket",
parameter_name=socket_mount_parameter_name,
string_value=ADDF_DISPLAY_SOCKET_PATH,
)
def add_eks_dcv_role(
self,
eks_cluster_open_id_connect_issuer: str,
eks_oidc_arn: str,
ssm_parameter_prefix: str,
env: Environment,
) -> iam.Role:
role = iam.Role(
self,
"Role",
assumed_by=iam.FederatedPrincipal(
eks_oidc_arn,
{"StringLike": {f"{eks_cluster_open_id_connect_issuer}:sub": "system:serviceaccount:*"}},
"sts:AssumeRoleWithWebIdentity",
),
managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")],
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"secretsmanager:GetSecretValue",
"secretsmanager:DescribeSecret",
],
resources=[f"arn:{self.partition}:secretsmanager:{env.region}:{env.account}:secret:dcv-cred*"],
)
)
role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"ssm:DescribeParameters",
"ssm:PutParameter",
"ssm:GetParameter",
],
resources=[
f"arn:{self.partition}:ssm:{env.region}:{env.account}:parameter{ssm_parameter_prefix}/dcv-*"
],
)
)
return role
def update_node_role_permissions(self, eks_node_role_arn: str, region: Optional[str]) -> None:
node_role = iam.Role.from_role_arn(self, "NodeRole", eks_node_role_arn)
node_role.add_to_principal_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"s3:GetObject",
],
resources=[f"arn:{self.partition}:s3:::dcv-license.{region}/*"],
)
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-k8s-deployment/app.py | modules/visualization/dcv-k8s-deployment/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
from aws_cdk import App, CfnOutput, Environment
from stack import DcvEksStack
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME")
module_name = os.getenv("SEEDFARMER_MODULE_NAME")
DEFAULT_DCV_NAMESPACE = "dcv"
if len(f"{project_name}-{deployment_name}") > 36:
raise ValueError("This module cannot support a project+deployment name character length greater than 35")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
dcv_namespace = os.getenv(_param("DCV_NAMESPACE"), DEFAULT_DCV_NAMESPACE)
dcv_image_uri = os.getenv(_param("DCV_IMAGE_URI"), "")
eks_cluster_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "")
eks_handler_role_arn = os.getenv(_param("EKS_HANDLER_ROLE_ARN"), "")
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "")
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "")
eks_cluster_open_id_connect_issuer = os.getenv(_param("EKS_CLUSTER_OPEN_ID_CONNECT_ISSUER"), "")
eks_cluster_security_group_id = os.getenv(_param("EKS_CLUSTER_SECURITY_GROUP_ID"), "")
eks_node_role_arn = os.getenv(_param("EKS_NODE_ROLE_ARN"), "")
fsx_pvc_name = os.getenv(_param("FSX_PVC_NAME"), "")
app = App()
dcv_eks_stack = DcvEksStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=cast(str, project_name),
deployment_name=cast(str, deployment_name),
module_name=cast(str, module_name),
dcv_namespace=dcv_namespace,
dcv_image_uri=dcv_image_uri,
eks_cluster_name=eks_cluster_name,
eks_cluster_admin_role_arn=eks_cluster_admin_role_arn,
eks_handler_role_arn=eks_handler_role_arn,
eks_oidc_arn=eks_oidc_arn,
eks_cluster_open_id_connect_issuer=eks_cluster_open_id_connect_issuer,
eks_cluster_security_group_id=eks_cluster_security_group_id,
eks_node_role_arn=eks_node_role_arn,
fsx_pvc_name=fsx_pvc_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=dcv_eks_stack,
id="metadata",
value=dcv_eks_stack.to_json_string(
{
"DcvEksRoleArn": dcv_eks_stack.eks_admin_role.role_arn,
"DcvNamespace": dcv_namespace,
"DcvDisplayParameterName": dcv_eks_stack.display_parameter_name,
"DcvSocketMountPathParameterName": dcv_eks_stack.socket_mount_path_parameter_name,
}
),
)
app.synth()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-k8s-deployment/tests/test_app.py | modules/visualization/dcv-k8s-deployment/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "12345678"
os.environ["CDK_DEFAULT_REGION"] = "us-east-2"
os.environ["SEEDFARMER_PARAMETER_DCV_NAMESPACE"] = "dcv"
os.environ["SEEDFARMER_PARAMETER_DCV_IMAGE_REPO_URI"] = "1234567890.dkr.ecr.us-east-1.amazonaws.com/test-repo"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN"] = "arn:aws:iam:us-east-1:1234567890:role/test-role"
os.environ["SEEDFARMER_PARAMETER_EKS_HANDLER_ROLE_ARN"] = "arn:aws:iam:us-east-1:1234567890:role/k8s-handler-role"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] = "test_cluster"
os.environ["SEEDFARMER_PARAMETER_EKS_OIDC_ARN"] = (
"arn:aws:eks:us-east-1:1234567890:oidc-provider/oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/test-ocid"
)
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_OPEN_ID_CONNECT_ISSUER"] = "test_open_id_connect_issuer"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_SECURITY_GROUP_ID"] = "sg-12345678"
os.environ["SEEDFARMER_PARAMETER_EKS_NODE_ROLE_ARN"] = "arn:us-east-1:iam:us-east-1:1234567890:role/test-role"
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_project_deployment_name_length(stack_defaults):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project-incredibly"
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "module cannot support a project+deployment name character length greater than" in str(e)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-k8s-deployment/tests/test_stack.py | modules/visualization/dcv-k8s-deployment/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk import Environment
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "1234567890"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_app(stack_defaults):
import stack
app = cdk.App()
stack = stack.DcvEksStack(
scope=app,
id="test-proj",
project_name="test_proj",
deployment_name="test_deploy",
module_name="test_module",
dcv_namespace="dcv",
dcv_image_uri="docker.ecr.test_image_uri:dcv-v1.0.0",
eks_cluster_name="test_cluster",
eks_cluster_admin_role_arn="arn:aws:iam:us-east-1:1234567890:role/test-role",
eks_oidc_arn="arn:aws:eks:us-east-1:1234567890:oidc-provider/oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/test-ocid",
eks_handler_role_arn="arn:aws:iam:us-east-1:1234567890:role/k8s-handler-role",
eks_cluster_open_id_connect_issuer="test_open_id_connect_issuer",
eks_cluster_security_group_id="sg-1234567890",
eks_node_role_arn="arn:aws:iam:us-east-1:1234567890:role/test-role",
fsx_pvc_name="test-fsx-pvc",
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(stack)
template.resource_count_is("AWS::IAM::Role", 1)
template.resource_count_is("AWS::IAM::Policy", 3)
template.resource_count_is("Custom::AWSCDK-EKS-KubernetesResource", 7)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-k8s-deployment/tests/__init__.py | modules/visualization/dcv-k8s-deployment/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance/stack.py | modules/visualization/dev-instance/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
import json
import os
from typing import cast
import aws_cdk.aws_secretsmanager as secretsmanager
from aws_cdk import Environment, Stack, Tags, aws_iam
from aws_cdk.aws_ec2 import (
BlockDevice,
BlockDeviceVolume,
EbsDeviceVolumeType,
Instance,
InstanceType,
MachineImage,
OperatingSystemType,
Peer,
Port,
SecurityGroup,
SubnetSelection,
SubnetType,
UserData,
Vpc,
)
from constructs import Construct, IConstruct
default_ami_ssm_parameter_name: str = (
"/aws/service/canonical/ubuntu/server/focal/stable/current/amd64/hvm/ebs-gp2/ami-id"
)
class DataServiceDevInstancesStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
env: Environment,
project_name: str,
deployment_name: str,
module_name: str,
vpc_id: str,
instance_count: int = 1,
ami_id: str = None,
instance_type: str = "g4dn.xlarge",
ebs_volume_size: int = 200,
ebs_encrypt: bool = True,
ebs_delete_on_termination: bool = True,
ebs_volume_type: EbsDeviceVolumeType = EbsDeviceVolumeType.GP3,
demo_password: str = None,
s3_dataset_bucket: str = None,
s3_script_bucket: str = None,
**kwargs,
) -> None:
super().__init__(
scope,
id,
description="(SO9154) Autonomous Driving Data Framework (ADDF) - dev-instance",
env=env,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="DeploymentName", value=f"{project_name}-{deployment_name}")
partition = Stack.of(self).partition
account = Stack.of(self).account
region = Stack.of(self).region
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
####################################################################################################
# VPC
####################################################################################################
vpc = Vpc.from_lookup(self, "VPC", vpc_id=vpc_id)
if ami_id is not None:
ami = MachineImage.lookup(name=ami_id)
else:
ami = MachineImage.from_ssm_parameter(default_ami_ssm_parameter_name, os=OperatingSystemType.LINUX)
instance_sg = SecurityGroup(
self,
id="instance-sg",
security_group_name=f"{module_name}-dev-instance-sg",
allow_all_outbound=True,
vpc=vpc,
)
instance_sg.add_ingress_rule(
Peer.any_ipv4(),
Port.tcp(8443),
"allow 8443 access everywhere",
)
with open(os.path.join("user-data", "script.sh"), "r") as f:
user_data_script = f.read()
dev_instance_role = aws_iam.Role(
self,
"dev-instance-role",
assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"),
managed_policies=[aws_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore")],
)
cloudformation_policy_json = {
"Effect": "Allow",
"Action": ["cloudformation:DescribeStacks"],
"Resource": [Stack.of(self).stack_id],
}
dcv_license_policy_json = {
"Effect": "Allow",
"Action": ["s3:GetObject"],
"Resource": [f"arn:{partition}:s3:::dcv-license.{region}/*"],
}
s3_policy_json = {
"Effect": "Allow",
"Action": ["s3:Get*", "s3:List*", "s3:PutObject*", "s3:DeleteObject*"],
"Resource": [f"arn:{partition}:s3:::{project_name}*", f"arn:{partition}:s3:::{project_name}*/*"],
}
lambda_policy_json = {
"Effect": "Allow",
"Action": ["lambda:Invoke*"],
"Resource": [f"arn:{partition}:lambda:{region}:{account}:function:{project_name}-*"],
}
if s3_dataset_bucket:
public_aev_dataset_policy_json = {
"Effect": "Allow",
"Action": ["s3:Get*", "s3:List*"],
"Resource": [
f"arn:{partition}:s3:::{s3_dataset_bucket}",
f"arn:{partition}:s3:::{s3_dataset_bucket}/*",
],
}
# Add the policies to the role
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(cloudformation_policy_json))
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(dcv_license_policy_json))
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(s3_policy_json))
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(lambda_policy_json))
if s3_dataset_bucket:
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(public_aev_dataset_policy_json))
i_output = {}
for idx in range(0, instance_count):
instance_name = f"dev-instance-{idx}"
secret = secretsmanager.Secret(
self,
f"Secret-{idx}",
description=f"Ubuntu password for {instance_name}",
secret_name=f"{dep_mod}-{idx}-ubuntu-password",
generate_secret_string=secretsmanager.SecretStringGenerator(
secret_string_template=json.dumps({"username": "ubuntu"}),
generate_string_key="password",
exclude_punctuation=True,
include_space=False,
exclude_characters="',. |<>=/\"\\\\$#;@[]{}~:`",
password_length=24,
)
if not demo_password
else None,
secret_string_beta1=secretsmanager.SecretStringValueBeta1.from_unsafe_plaintext(
json.dumps({"username": "ubuntu", "password": demo_password})
)
if demo_password
else None,
)
secret.grant_read(dev_instance_role)
user_data_script = user_data_script.replace("$DATA_SERVICE_USER_SECRET_NAME_REF", secret.secret_name)
if s3_script_bucket:
user_data_script = user_data_script.replace("$S3_SCRIPT_BUCKET", s3_script_bucket)
user_data_script = user_data_script.replace("$SCRIPT_PATH", f"{deployment_name}-{module_name}/scripts/")
self.secret = secret
instance = Instance(
self,
id=instance_name,
machine_image=ami,
instance_type=InstanceType(instance_type_identifier=instance_type),
block_devices=[
BlockDevice(
device_name="/dev/sda1",
volume=BlockDeviceVolume.ebs(
encrypted=ebs_encrypt,
delete_on_termination=ebs_delete_on_termination,
volume_size=ebs_volume_size,
volume_type=ebs_volume_type,
),
)
],
vpc=vpc,
user_data=UserData.custom(user_data_script),
role=dev_instance_role,
vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC),
security_group=instance_sg,
)
self.instance = instance
i_output[instance_name] = {
"DevInstanceURL": f"https://{instance.instance_public_dns_name}:8443",
"AWSSecretName": secret.secret_name,
}
self.output_instances = i_output
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance/app.py | modules/visualization/dev-instance/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
import json
import os
from aws_cdk import App, CfnOutput, Environment
from stack import DataServiceDevInstancesStack
# Project vars
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID"))
instance_type = os.getenv(_param("INSTANCE_TYPE"), "g4dn.xlarge")
instance_count = int(os.getenv(_param("INSTANCE_COUNT"), "1"))
ami_id = os.getenv(_param("AMI_ID"), None)
s3_dataset_bucket = os.getenv(_param("S3_DATASET_BUCKET"), None)
s3_script_bucket = os.getenv(_param("S3_SCRIPT_BUCKET"), None)
demo_password = os.getenv(_param("DEMO_PASSWORD"), None)
stack_id = "data-src-dev-instances"
if deployment_name and module_name:
stack_id = f"{project_name}-{deployment_name}-{module_name}"
app = App()
env = Environment(
account=os.environ.get("CDK_DEPLOY_ACCOUNT", os.environ["CDK_DEFAULT_ACCOUNT"]),
region=os.environ.get("CDK_DEPLOY_REGION", os.environ["CDK_DEFAULT_REGION"]),
)
stack = DataServiceDevInstancesStack(
scope=app,
id=stack_id,
env=env,
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
vpc_id=vpc_id,
instance_count=instance_count,
instance_type=instance_type,
ami_id=ami_id,
demo_password=demo_password,
s3_dataset_bucket=s3_dataset_bucket,
s3_script_bucket=s3_script_bucket,
)
CfnOutput(scope=stack, id="metadata", value=json.dumps(stack.output_instances))
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance/scripts/get_url.py | modules/visualization/dev-instance/scripts/get_url.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# type: ignore
import json
import sys
from argparse import ArgumentParser
import boto3
def main():
parser = ArgumentParser(description="Request a Presigned URL from the generateUrlLambda")
parser.add_argument(
"--config-file",
dest="config_file",
required=False,
help='Name of the JSON file with Module\'s Metadata, use "-" to read from STDIN',
)
parser.add_argument(
"--bucket-name",
dest="bucket_name",
required=False,
help="the name of the bucket containing rosbag file, required if no --config-file is provided",
)
parser.add_argument(
"--function-name",
dest="function_name",
required=False,
help="The generateUrlFunctionName, required if no --config-file is provided",
)
parser.add_argument("--key", dest="object_key", required=False, help="the key of the object in s3")
parser.add_argument(
"--record",
dest="record_id",
required=False,
help="the partition key of the scene in the scenario db",
)
parser.add_argument(
"--scene",
dest="scene_id",
required=False,
help="the sort key of the scene in the scenario db",
)
args = parser.parse_args()
if args.config_file is not None:
if args.config_file == "-":
metadata = json.load(sys.stdin)
else:
with open(args.config_file) as metadata_file:
metadata = json.load(metadata_file)
else:
metadata = {}
bucket_name = args.bucket_name if args.bucket_name is not None else metadata.get("TargetBucketName", None)
if bucket_name is None:
raise Exception('One of JSON config file key "TargetBucketName" or --bucket-name must be provided')
function_name = (
args.function_name if args.function_name is not None else metadata.get("GenerateUrlLambdaName", None)
)
if function_name is None:
raise Exception('One of JSON config file key "GenerateUrlLambdaName" or --function-name must be provided')
if args.object_key is None and (args.record_id is None or args.scene_id is None):
raise Exception("You need to either specify --key or --record and --scene")
client = boto3.client("lambda")
print(f"Invoking: {function_name}")
payload = {
"bucket": bucket_name,
"key": args.object_key,
"record_id": args.record_id,
"scene_id": args.scene_id,
}
print("payload: " + json.dumps(payload))
response = client.invoke(
FunctionName=str(function_name),
InvocationType="RequestResponse",
LogType="Tail",
Payload=json.dumps(payload),
)
res = json.loads(response["Payload"].read())
statusCode = int(res.get("statusCode"))
body = json.loads(res.get("body"))
print(str(statusCode))
if statusCode == 200:
url = body.get("url")
print(url)
else:
print(json.dumps(body))
if __name__ == "__main__":
main()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance/tests/test_stack.py | modules/visualization/dev-instance/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance/tests/__init__.py | modules/visualization/dev-instance/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance-foxbox/stack.py | modules/visualization/dev-instance-foxbox/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
"""Stack for DataServiceDevInstances"""
import json
import os
from typing import cast
import aws_cdk.aws_secretsmanager as secretsmanager
from aws_cdk import Environment, SecretValue, Stack, Tags, aws_iam
from aws_cdk.aws_ec2 import (
BlockDevice,
BlockDeviceVolume,
EbsDeviceVolumeType,
Instance,
InstanceType,
MachineImage,
OperatingSystemType,
Peer,
Port,
SecurityGroup,
SubnetSelection,
SubnetType,
UserData,
Vpc,
)
from constructs import Construct, IConstruct
SSM_PARAMETER_MAP: dict = {
"focal": "/aws/service/canonical/ubuntu/server/focal/stable/current/amd64/hvm/ebs-gp2/ami-id",
"jammy": "/aws/service/canonical/ubuntu/server/jammy/stable/current/arm64/hvm/ebs-gp2/ami-id",
"noble": "/aws/service/canonical/ubuntu/server/noble/stable/current/amd64/hvm/ebs-gp3/ami-id",
}
STACK_DESCRIPTION = "(SO9154) Autonomous Driving Data Framework (ADDF) - dev-instance-foxbox"
class DataServiceDevInstancesStack(Stack):
"""Dev Instance Class"""
_user_data: str = None
# pylint: disable=R0913,R0914
def __init__(
self,
scope: Construct,
stack_id: str,
*,
env: Environment,
project_name: str,
deployment_name: str,
module_name: str,
vpc_id: str,
instance_count: int = 1,
ami_id: str = None,
instance_type: str = "g4dn.xlarge",
ebs_volume_size: int = 200,
ebs_encrypt: bool = True,
ebs_delete_on_termination: bool = True,
ebs_volume_type: EbsDeviceVolumeType = EbsDeviceVolumeType.GP3,
demo_password: str = None,
s3_bucket_dataset: str = None,
s3_bucket_scripts: str = None,
**kwargs,
) -> None:
"""Constructor"""
super().__init__(scope, stack_id, description=STACK_DESCRIPTION, env=env, **kwargs)
###################
# Initial Variables
partition = Stack.of(self).partition
account = Stack.of(self).account
region = Stack.of(self).region
prefix = stack_id
###################
# Tags
Tags.of(scope=cast(IConstruct, self)).add(key="DeploymentName", value=f"{project_name}-{deployment_name}")
###################
# AMI Selection
ami_selected = SSM_PARAMETER_MAP.get(ami_id, SSM_PARAMETER_MAP.get("focal"))
if ami_id is not None:
ami = MachineImage.lookup(name=ami_id)
else:
ami = MachineImage.from_ssm_parameter(ami_selected, os=OperatingSystemType.LINUX)
###################
# VPC
vpc = Vpc.from_lookup(self, "VPC", vpc_id=vpc_id)
###################
# Security Groups
instance_sg = SecurityGroup(
self,
id="instance-sg",
security_group_name=f"{prefix}-sg",
allow_all_outbound=True,
vpc=vpc,
)
instance_sg.add_ingress_rule(
Peer.any_ipv4(),
Port.tcp(8443),
"Allow 8443 access everywhere (NiceDCV)",
)
###################
# IAM Roles
dev_instance_role = aws_iam.Role(
self,
"dev-instance-role",
assumed_by=aws_iam.CompositePrincipal(
aws_iam.ServicePrincipal("ec2.amazonaws.com"),
aws_iam.ServicePrincipal("ssm.amazonaws.com"),
),
managed_policies=[
aws_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"),
aws_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedEC2InstanceDefaultPolicy"),
],
)
custom_policies = {}
custom_policies["cloudformation"] = {
"sid": "AllowCloudformation",
"Effect": "Allow",
"Action": ["cloudformation:DescribeStacks"],
"Resource": [Stack.of(self).stack_id],
}
custom_policies["dcv_license"] = {
"sid": "AllowDCVLicense",
"Effect": "Allow",
"Action": ["s3:GetObject"],
"Resource": [f"arn:{partition}:s3:::dcv-license.{region}/*"],
}
custom_policies["s3"] = {
"sid": "AllowADDFS3Buckets",
"Effect": "Allow",
"Action": ["s3:Get*", "s3:List*", "s3:PutObject*", "s3:DeleteObject*"],
"Resource": [f"arn:{partition}:s3:::{project_name}*", f"arn:{partition}:s3:::{project_name}*/*"],
}
custom_policies["lambda"] = {
"sid": "AllowADDFLambdas",
"Effect": "Allow",
"Action": ["lambda:Invoke*"],
"Resource": [f"arn:{partition}:lambda:{region}:{account}:function:{project_name}-*"],
}
if s3_bucket_dataset:
custom_policies["s3_dataset"] = {
"sid": "AllowDatasetsS3Buckets",
"Effect": "Allow",
"Action": ["s3:Get*", "s3:List*"],
"Resource": [
f"arn:{partition}:s3:::{s3_bucket_dataset}",
f"arn:{partition}:s3:::{s3_bucket_dataset}/*",
],
}
# Add the policies to the role
# pylint: disable=W0612
for policy_key, policy in custom_policies.items():
dev_instance_role.add_to_principal_policy(aws_iam.PolicyStatement.from_json(policy))
i_output = {}
###################
# Instances Iterator
for idx in range(0, instance_count):
instance_name = f"{module_name}-{idx}"
###################
# Secret Manager (Ubuntu Password)
if not demo_password:
secret_password = secretsmanager.SecretStringGenerator(
secret_string_template=json.dumps({"username": "ubuntu"}),
generate_string_key="password",
exclude_punctuation=True,
include_space=False,
exclude_characters=r"\"',. |<>=/\\#;@[]{}~:`\$",
password_length=24,
)
else:
secret_password = {
"username": SecretValue.unsafe_plain_text("ubuntu"),
"password": SecretValue.unsafe_plain_text(demo_password),
}
secret = secretsmanager.Secret(
self,
f"{prefix}-Secret-{idx}",
description=f"Ubuntu password for {instance_name}",
secret_name=f"{prefix}-{idx}-ubuntu-password",
secret_object_value=secret_password if demo_password else None,
generate_secret_string=secret_password if not demo_password else None,
)
secret.grant_read(dev_instance_role)
###################
# User Data
user_data_script = self.get_user_data()
user_data_script = user_data_script.replace(
'SERVICE_USER_SECRET_NAME="PLACEHOLDER_SECRET"',
f'SERVICE_USER_SECRET_NAME="{secret.secret_name}"',
)
if s3_bucket_scripts:
user_data_script = user_data_script.replace(
'S3_BUCKET_NAME="PLACEHOLDER_S3_BUCKET_NAME"',
f'S3_BUCKET_NAME="{s3_bucket_scripts}"',
)
user_data_script = user_data_script.replace(
'SCRIPTS_PATH="PLACEHOLDER_SCRIPTS_PATH"',
f'SCRIPTS_PATH="{deployment_name}-{module_name}/scripts/"',
)
###################
# Instance resource
instance = Instance(
self,
id=instance_name,
machine_image=ami,
instance_type=InstanceType(instance_type_identifier=instance_type),
block_devices=[
BlockDevice(
device_name="/dev/sda1",
volume=BlockDeviceVolume.ebs(
encrypted=ebs_encrypt,
delete_on_termination=ebs_delete_on_termination,
volume_size=ebs_volume_size,
volume_type=ebs_volume_type,
),
)
],
vpc=vpc,
user_data=UserData.custom(user_data_script),
role=dev_instance_role,
vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC),
security_group=instance_sg,
require_imdsv2=True,
)
self.instance = instance
i_output[instance_name] = {
"DevInstanceURL": f"https://{instance.instance_public_dns_name}:8443",
"AWSSecretName": secret.secret_name,
}
self.output_instances = i_output
def get_user_data(self):
"""Get User Data"""
if not self._user_data:
with open(os.path.join("user-data", "script.sh"), "r", encoding="utf-8") as f:
self._user_data = f.read()
return self._user_data
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance-foxbox/app.py | modules/visualization/dev-instance-foxbox/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
"""Seedfarmer Module for Data Service Dev Instances"""
import json
import os
from aws_cdk import App, CfnOutput, Environment
from stack import DataServiceDevInstancesStack
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
###################
# General Environment Variables
PROJECT_NAME = os.getenv("SEEDFARMER_PROJECT_NAME", None)
DEPLOYMENT_NAME = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", None)
MODULE_NAME = os.getenv("SEEDFARMER_MODULE_NAME", None)
VPC_ID = os.getenv(_param("VPC_ID"), None)
INSTANCE_TYPE = os.getenv(_param("INSTANCE_TYPE"), "g4dn.xlarge")
INSTANCE_COUNT = int(os.getenv(_param("INSTANCE_COUNT"), "1"))
AMI_ID = os.getenv(_param("AMI_ID"), None)
S3_BUCKET_DATASET = os.getenv(_param("S3_BUCKET_DATASET"), None)
S3_BUCKET_SCRIPTS = os.getenv(_param("S3_BUCKET_SCRIPTS"), None)
DEMO_PASSWORD = os.getenv(_param("DEMO_PASSWORD"), None)
STACK_ID = "data-src-dev-instances"
if DEPLOYMENT_NAME and MODULE_NAME:
STACK_ID = f"{PROJECT_NAME}-{DEPLOYMENT_NAME}-{MODULE_NAME}"
ENV = Environment(
account=os.environ.get("CDK_DEPLOY_ACCOUNT", os.environ["CDK_DEFAULT_ACCOUNT"]),
region=os.environ.get("CDK_DEPLOY_REGION", os.environ["CDK_DEFAULT_REGION"]),
)
###################
# Stack
app = App()
stack = DataServiceDevInstancesStack(
app,
STACK_ID,
env=ENV,
project_name=PROJECT_NAME,
deployment_name=DEPLOYMENT_NAME,
module_name=MODULE_NAME,
vpc_id=VPC_ID,
instance_count=INSTANCE_COUNT,
instance_type=INSTANCE_TYPE,
ami_id=AMI_ID,
demo_password=DEMO_PASSWORD,
s3_bucket_dataset=S3_BUCKET_DATASET,
s3_bucket_scripts=S3_BUCKET_SCRIPTS,
)
CfnOutput(scope=stack, id="metadata", value=json.dumps(stack.output_instances))
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance-foxbox/scripts/get_url.py | modules/visualization/dev-instance-foxbox/scripts/get_url.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# type: ignore
"""Script to Request Pre-signed URL from the generateUrlLambda"""
import json
import logging
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import boto3
LOGGING_FORMAT = "%(asctime)s\t%(levelname)s\t%(message)s"
LOGGING_DATEFMT = "%Y-%m-%d %H:%M:%S"
LOG_LEVEL = logging.INFO
LOGGER = {}
CONFIG_FILE_OPT = "--config-file"
SYSARGV = " ".join(sys.argv)
logging.basicConfig(format=LOGGING_FORMAT, level=LOG_LEVEL, datefmt=LOGGING_DATEFMT)
METADATA = {}
######################
# Argparser Overrides
class ScriptParser(ArgumentParser):
"""Helps manage Argparser Output"""
def __init__(self, *args, **kwargs):
"""Constructor Override"""
kwargs["description"] = kwargs.get("description", self.get_description())
kwargs["usage"] = kwargs.get("usage", self.get_usage())
super().__init__(*args, **kwargs)
def get_description(self):
"""Override main description method"""
return "\n".join(
(
"The format of the config file is:",
" {",
' "BucketName":"s3-bucket-name",',
' "FunctionName":"lambda-to-invoke",',
' "Key":"path/to/rosbag/file",',
' "RecordID":"record_id from DynamoDB (Partition Key)",',
' "SceneID":"scene_id from DynamoDB (Sort Key)"',
" }",
)
)
def get_usage(self):
"""Override main usage method"""
return "\n".join(
(
"",
" %(prog)s --config-file config.json",
" %(prog)s --bucket-name s3-bucket-name --function-name lambda-to-invoke"
" --key path/to/rosbag/file --record record_id --scene scene_id",
" cat config.json | %(prog)s --config-file -",
' echo "{...}" | %(prog)s --config-file -',
)
)
def error(self, message: str):
"""Override main error method"""
logging.error(f"{message}\n")
self.print_help(sys.stderr)
sys.exit(2)
######################
# Main Execution
def main(metadata: dict):
"""Script Main Execution"""
client = boto3.client("lambda")
logging.info(f"Invoking: {metadata['FunctionName']}")
payload = {
"bucket": metadata["BucketName"],
"key": metadata["Key"],
"record_id": metadata["RecordID"],
"scene_id": metadata["SceneID"],
}
logging.info(f"Payload: {json.dumps(payload)}")
response = client.invoke(
FunctionName=str(metadata["FunctionName"]),
InvocationType="RequestResponse",
LogType="Tail",
Payload=json.dumps(payload),
)
res = json.loads(response["Payload"].read())
status_code = int(res.get("statusCode"))
body = json.loads(res.get("body"))
logging.info(f"Response Status Code: {str(status_code)}")
if status_code == 200:
url = body.get("url")
print(url)
else:
print(json.dumps(body))
if __name__ == "__main__":
######################
# Parse Arguments
parser = ScriptParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
CONFIG_FILE_OPT,
dest="config_file",
required=False,
help='Name of the JSON file with Module\'s Metadata, use "-" to read from STDIN',
)
parser.add_argument(
"--bucket-name",
dest="bucket_name",
required=(CONFIG_FILE_OPT not in SYSARGV),
help="The name of the bucket containing rosbag file, required if no --config-file is provided",
)
parser.add_argument(
"--function-name",
dest="function_name",
required=(CONFIG_FILE_OPT not in SYSARGV),
help="The function to invoke, required if no --config-file is provided",
)
parser.add_argument(
"--key",
dest="object_key",
required=(CONFIG_FILE_OPT not in SYSARGV),
help="The key of the object in S3",
)
parser.add_argument(
"--record",
dest="record_id",
required=False,
help="The partition key of the scene in the scenario DynamoDB",
)
parser.add_argument(
"--scene",
dest="scene_id",
required=False,
help="The sort key of the scene in the scenario DynamoDB",
)
parser.add_argument("--debug", action="store_true", required=False, help="Enable Debug messages")
arguments = parser.parse_args()
######################
# Modify Logging level
logging.getLogger().setLevel(logging.DEBUG if arguments.debug else LOG_LEVEL)
######################
# Verify config file
try:
config_keys = ("BucketName", "FunctionName", "Key")
if arguments.config_file is not None:
logging.debug(f"Loading config file {arguments.config_file}")
if arguments.config_file == "-":
METADATA = json.load(sys.stdin)
else:
with open(arguments.config_file, encoding="UTF-8") as config_file:
METADATA = json.load(config_file)
logging.debug(f"Config file loaded: {METADATA}")
if not all(c_key in METADATA for c_key in config_keys):
raise ValueError(f"Config file missing parameters: {config_keys}")
except (ValueError, FileNotFoundError) as exc:
logging.error(f"Error loading config file {arguments.config_file}: {exc}")
sys.exit(1)
######################
# Verify parameters
try:
if arguments.config_file is None:
params_keys = ("bucket_name", "function_name", "object_key")
if not all((p_key in arguments and getattr(arguments, p_key) != "") for p_key in params_keys):
raise ValueError(f"Missing or empty parameters: {params_keys}")
if (getattr(arguments, "record_id") is not None and getattr(arguments, "record_id") != "") or (
getattr(arguments, "scene_id") is not None and getattr(arguments, "scene_id") != ""
):
raise ValueError("Optional parameters should be set if any of both are declared: (record_id, scene_id)")
METADATA = {
"BucketName": getattr(arguments, "bucket_name"),
"FunctionName": getattr(arguments, "function_name"),
"Key": getattr(arguments, "object_key"),
"RecordID": getattr(arguments, "record_id"),
"SceneID": getattr(arguments, "scene_id"),
}
except (ValueError, FileNotFoundError) as exc:
logging.error(f"Provided parameters: {arguments}")
logging.error(f"{exc}")
sys.exit(1)
######################
# Execute
main(metadata=METADATA)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance-foxbox/tests/test_stack.py | modules/visualization/dev-instance-foxbox/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dev-instance-foxbox/tests/__init__.py | modules/visualization/dev-instance-foxbox/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/stack.py | modules/visualization/dcv-image/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import Any, cast
from aws_cdk import Stack, Tags
from aws_cdk import aws_ecr as ecr
from aws_cdk.aws_ecr_assets import DockerImageAsset
from cdk_ecr_deployment import DockerImageName, ECRDeployment
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
class DcvImagePublishingStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
repository_name: str,
deployment_name: str,
module_name: str,
dcv_sm_name: str,
**kwargs: Any,
) -> None:
super().__init__(scope, id, **kwargs)
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
dep_mod = f"{self.project_name}-{self.deployment_name}-{self.module_name}"
dep_mod = dep_mod[:64]
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=dep_mod)
repo = ecr.Repository.from_repository_name(self, id=dep_mod + repository_name, repository_name=repository_name)
local_image = DockerImageAsset(
self,
"ImageExtractionDockerImage",
directory=os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"),
build_args={"DCV_SM_NAME": dcv_sm_name},
)
self.image_uri = f"{repo.repository_uri}:dcv-latest"
ECRDeployment(
self,
"ImageURI",
src=DockerImageName(local_image.image_uri),
dest=DockerImageName(self.image_uri),
)
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for src account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/app.py | modules/visualization/dcv-image/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
from aws_cdk import App, CfnOutput, Environment
from stack import DcvImagePublishingStack
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME")
module_name = os.getenv("SEEDFARMER_MODULE_NAME")
if len(f"{project_name}-{deployment_name}") > 36:
raise ValueError("This module cannot support a project+deployment name character length greater than 35")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
ecr_repo_name = os.getenv(_param("DCV_ECR_REPOSITORY_NAME"), "")
dcv_sm_name = os.getenv(_param("DCV_SM_NAME"), "dcv-credentials")
app = App()
dcv_image_pushing_stack = DcvImagePublishingStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=cast(str, project_name),
deployment_name=cast(str, deployment_name),
repository_name=ecr_repo_name,
module_name=cast(str, module_name),
dcv_sm_name=dcv_sm_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=dcv_image_pushing_stack,
id="metadata",
value=dcv_image_pushing_stack.to_json_string(
{
"DCVImageUri": dcv_image_pushing_stack.image_uri,
}
),
)
app.synth()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/src/update_parameters.py | modules/visualization/dcv-image/src/update_parameters.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import logging
import os
import subprocess
import sys
import typing
import boto3
import kubernetes
NAMESPACE_FILE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
ADDF_CONFIG_MAP_ENV_VAR_NAME = "DCV_EKS_CONFIG_MAP_NAME"
ADDF_DCV_SESSION_INFO_DISPLAY_KEY = "x11-display"
ADDF_DCV_DISPLAY_KEY_SUFFIX = "display"
ADDF_CONFIG_MAP_PATCH_KEY = "data"
ADDF_DCV_SESSION_NAME = "default-session"
ADDF_SSM_PARAMETER_STORE_ENV_NAME = "DCV_EKS_DISPLAY_PARAMETER_STORE"
def get_display_number() -> str:
"""
Get the display number from the dcv default-session
Returns
-------
Str
The display number created by dcv default-session
Examples
--------
>>> get_display_number()
:0
"""
ret = subprocess.check_output(["dcv", "describe-session", ADDF_DCV_SESSION_NAME, "--json"]).decode("utf-8")
session_info = json.loads(ret)
logging.info(f"Processing session {ADDF_DCV_SESSION_NAME} with info: {session_info}")
display = str(session_info[ADDF_DCV_SESSION_INFO_DISPLAY_KEY])
return display
def verify_config_map(
client_v1: kubernetes.client.api.core_v1_api.CoreV1Api,
namespace: str,
config_map_name: str,
) -> bool:
"""
Verify the existence of ConfigMap
Parameters
----------
client_v1: kubernetes.client.api.core_v1_api.CoreV1Api
Kubernetes client
namespace: str
The namespace which the ConfigMap is queried
config_map_name: str
The name of the ConfigMap
Returns
-------
bool
The result of the verification. True for existence and vice versa.
Examples
--------
>>> verify_config_map(client, "dcv", "config-map")
True
"""
ret = client_v1.list_namespaced_config_map(namespace)
for config_map in ret.items:
if config_map.metadata.name == config_map_name:
return True
logging.error(f"Unable to find ConfigMap {config_map_name}")
return False
def update_config_map(
client_v1: kubernetes.client.api.core_v1_api.CoreV1Api,
namespace: str,
config_map_name: str,
display: str,
) -> bool:
"""
Update ConfigMap with display number
Parameters
----------
client_v1: kubernetes.client.api.core_v1_api.CoreV1Api
Kubernetes client
namespace: str
The namespace which the ConfigMap is queried
config_map_name: str
The name of the ConfigMap
display: str
The display number created by dcv default-session
Returns
-------
bool
The result of the update. True for successful update in ConfigMap and
vice
versa.
Examples
--------
>>> verify_config_map(client, "dcv", "config-map", ":0")
True
"""
body: typing.Dict[str, typing.Dict[str, str]] = {ADDF_CONFIG_MAP_PATCH_KEY: {}}
key = f"{ADDF_DCV_DISPLAY_KEY_SUFFIX}"
body[ADDF_CONFIG_MAP_PATCH_KEY][key] = display
config_map_patch_result: typing.Any = client_v1.patch_namespaced_config_map(config_map_name, namespace, body)
if config_map_patch_result.kind == "ConfigMap":
return True
logging.error(f"Unable to update ConfigMap {config_map_name}")
return False
def update_parameter_store(display: str) -> bool:
"""
Update ConfigMap with display number
Parameters
----------
display: str
The display number created by dcv default-session
Returns
-------
bool
The result of the update. True for successful updat SSM
parameters and vice versa.
Examples
--------
>>> update_parameter_store(":0")
True
"""
"""
Verify the existence of ConfigMap
Parameters
----------
client_v1: kubernetes.client.api.core_v1_api.CoreV1Api
Kubernetes client
namespace: str
The namespace which the ConfigMap is queried
config_map_name: str
The name of the ConfigMap
Returns
-------
bool
The result of the verification. True for existence and vice versa.
Examples
--------
>>> verify_config_map()
True
"""
client = boto3.client("ssm", region_name=os.getenv("AWS_REGION"))
try:
response = client.put_parameter(
Name=os.getenv(ADDF_SSM_PARAMETER_STORE_ENV_NAME),
Value=display,
Type="String",
Overwrite=True,
)
logging.info(response)
return True
except Exception as e:
logging.error(e)
return False
def main() -> None:
kubernetes.config.load_incluster_config()
client_v1 = kubernetes.client.CoreV1Api()
display = get_display_number()
with open(NAMESPACE_FILE_PATH, "r") as f:
namespace = f.read()
logging.info(f"Looking for resource in namespace {namespace}")
config_map_name = os.getenv(ADDF_CONFIG_MAP_ENV_VAR_NAME, "")
if config_map_name == "":
print("ConfigMap name empty")
sys.exit(1)
if not verify_config_map(client_v1, namespace, config_map_name):
sys.exit(1)
if not update_config_map(client_v1, namespace, config_map_name, display):
sys.exit(1)
if not update_parameter_store(display):
sys.exit(1)
if __name__ == "__main__":
main()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/tests/test_app.py | modules/visualization/dcv-image/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_project_deployment_name_length(stack_defaults):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project-incredibly"
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "module cannot support a project+deployment name character length greater than" in str(e)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/tests/test_stack.py | modules/visualization/dcv-image/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_app(stack_defaults):
import stack
app = cdk.App()
dep_name = "test-deployment"
mod_name = "test-project"
stack = stack.DcvImagePublishingStack(
scope=app,
id="addf-dcv-image-test-module",
project_name=mod_name,
repository_name="test-repo",
deployment_name=dep_name,
module_name=mod_name,
dcv_sm_name="test-dcv-session-manager",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(stack)
template.resource_count_is("Custom::CDKBucketDeployment", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/visualization/dcv-image/tests/__init__.py | modules/visualization/dcv-image/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/stack.py | modules/sensor-extraction/ros-to-parquet/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from typing import Any, Union, cast
import aws_cdk.aws_batch as batch
import aws_cdk.aws_ecr as ecr
import aws_cdk.aws_ecs as ecs
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Duration, Size, Stack, Tags
from aws_cdk.aws_ecr_assets import DockerImageAsset
from cdk_ecr_deployment import DockerImageName, ECRDeployment
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class RosToParquetBatchJob(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
ecr_repository_arn: str,
s3_access_policy: str,
platform: str, # FARGATE or EC2
retries: int,
timeout_seconds: int,
vcpus: int,
memory_limit_mib: int,
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment",
value="aws",
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
repo = ecr.Repository.from_repository_arn(self, "Repository", ecr_repository_arn)
local_image = DockerImageAsset(
self,
"RosToParquet",
directory=os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"),
)
image_uri = f"{repo.repository_uri}:latest"
ECRDeployment(
self,
"RosToParquetURI",
src=DockerImageName(local_image.image_uri),
dest=DockerImageName(image_uri),
)
policy_statements = [
iam.PolicyStatement(
actions=["dynamodb:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:dynamodb:{self.region}:{self.account}:table/{project_name}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["s3:GetObject", "s3:GetObjectAcl", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{project_name}-*",
f"arn:{self.partition}:s3:::{project_name}-*/*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
role = iam.Role(
self,
f"{repo.repository_name}-batch-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
iam.ManagedPolicy.from_managed_policy_arn(self, id="fullaccess", managed_policy_arn=s3_access_policy),
],
max_session_duration=Duration.hours(12),
)
container_def: Union[batch.EcsFargateContainerDefinition, batch.EcsEc2ContainerDefinition]
if platform == "FARGATE":
container_def = batch.EcsFargateContainerDefinition(
self,
"batch-job-container-def",
image=ecs.ContainerImage.from_ecr_repository(repo, "latest"),
command=["bash", "entrypoint.sh"],
environment={
"AWS_DEFAULT_REGION": self.region,
"AWS_ACCOUNT_ID": self.account,
"DEBUG": "true",
},
job_role=role,
execution_role=role,
memory=Size.mebibytes(memory_limit_mib),
cpu=vcpus,
)
else:
container_def = batch.EcsEc2ContainerDefinition(
self,
"batch-job-container-def",
image=ecs.ContainerImage.from_ecr_repository(repo, "latest"),
command=["bash", "entrypoint.sh"],
environment={
"AWS_DEFAULT_REGION": self.region,
"AWS_ACCOUNT_ID": self.account,
"DEBUG": "true",
},
job_role=role,
execution_role=role,
memory=Size.mebibytes(memory_limit_mib),
cpu=vcpus,
)
self.batch_job = batch.EcsJobDefinition(
self,
"batch-job-def-from-ecr",
container=container_def,
job_definition_name=repo.repository_name,
retry_attempts=retries,
timeout=Duration.seconds(timeout_seconds),
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/app.py | modules/sensor-extraction/ros-to-parquet/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import RosToParquetBatchJob
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
ecr_repository_arn = os.getenv(_param("ECR_REPOSITORY_ARN"))
full_access_policy = os.getenv(_param("FULL_ACCESS_POLICY_ARN"))
platform = os.getenv(_param("PLATFORM"), "FARGATE")
retries = int(os.getenv(_param("RETRIES"), 1))
timeout_seconds = int(os.getenv(_param("TIMEOUT_SECONDS"), 60))
vcpus = int(os.getenv(_param("VCPUS"), 4))
memory_limit_mib = int(os.getenv(_param("MEMORY_MIB"), 16384))
if not ecr_repository_arn:
raise ValueError("ECR Repository ARN is missing.")
if not full_access_policy:
raise ValueError("S3 Full Access Policy ARN is missing.")
if platform not in ["FARGATE", "EC2"]:
raise ValueError("Platform must be either FARGATE or EC2")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "(SO9154) Autonomous Driving Data Framework (ADDF) - ros-to-parquet"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = RosToParquetBatchJob(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
ecr_repository_arn=ecr_repository_arn,
platform=platform,
retries=retries,
timeout_seconds=timeout_seconds,
vcpus=vcpus,
memory_limit_mib=memory_limit_mib,
s3_access_policy=full_access_policy,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"JobDefinitionArn": stack.batch_job.job_definition_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/src/main.py | modules/sensor-extraction/ros-to-parquet/src/main.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import logging
import os
import sys
import zipfile
import boto3
import fastparquet
import pandas as pd
import requests
from rclpy.serialization import deserialize_message
from rosbag2_py import ConverterOptions, SequentialReader, StorageOptions
from rosidl_runtime_py.utilities import get_message
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d][%(levelname)s][%(threadName)s] %(message)s"
debug = os.environ.get("DEBUG", "False").lower() in [
"true",
"yes",
"1",
]
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format=DEBUG_LOGGING_FORMAT)
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(level)
if debug:
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("s3transfer").setLevel(logging.CRITICAL)
class ParquetFromBag:
def __init__(self, ros2_path, topic, output_path, drive_id, file_id):
clean_topic = topic.replace("/", "_")
output_dir = os.path.join(output_path, clean_topic)
os.makedirs(output_dir, exist_ok=True)
local_parquet_name = os.path.join(output_dir, "data.parquet")
storage_options = StorageOptions(uri=ros2_path, storage_id="sqlite3")
converter_options = ConverterOptions("", "")
reader = SequentialReader()
reader.open(storage_options, converter_options)
topic_types = reader.get_all_topics_and_types()
type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
# Collect all messages for this topic
messages = []
while reader.has_next():
(topic_name, data, timestamp) = reader.read_next()
if topic_name == topic:
msg_type = get_message(type_map[topic_name])
msg = deserialize_message(data, msg_type)
# Convert message to dict (this is simplified - you may need custom logic per message type)
msg_dict = self._message_to_dict(msg)
msg_dict["timestamp"] = timestamp
msg_dict["drive_id"] = drive_id
msg_dict["file_id"] = file_id
messages.append(msg_dict)
reader.close()
self.file = {
"local_parquet_path": local_parquet_name,
"topic": clean_topic,
}
if not messages:
logging.info("No data found for {topic}".format(topic=topic))
else:
logging.info("Reading data found for {topic}".format(topic=topic))
df_out = pd.DataFrame(messages)
df_out.columns = [x.replace(".", "_") for x in df_out.columns]
fastparquet.write(local_parquet_name, df_out)
def _message_to_dict(self, msg):
"""Convert ROS2 message to dictionary - simplified version"""
result = {}
for field_name in msg.get_fields_and_field_types():
try:
value = getattr(msg, field_name)
# Handle basic types - you may need to expand this for complex types
if hasattr(value, "get_fields_and_field_types"):
result[field_name] = self._message_to_dict(value)
else:
result[field_name] = value
except Exception:
result[field_name] = None
return result
def upload(client, bucket_name, drive_id, file_id, files):
target_prefixes = set()
for file in files:
clean_topic = file["topic"]
target_prefix = os.path.join(drive_id, file_id.replace(".zip", ""), clean_topic)
target = os.path.join(target_prefix, "data.parquet")
client.upload_file(file["local_parquet_path"], bucket_name, target)
target_prefixes.add(target_prefix)
return list(target_prefixes)
def get_log_path():
response = requests.get(f"{os.environ['ECS_CONTAINER_METADATA_URI_V4']}", timeout=10)
task_region = response.json()["LogOptions"]["awslogs-region"]
return task_region, response.json()["LogOptions"]["awslogs-stream"].replace("/", "$252F")
def save_job_url_and_logs(table, drive_id, file_id, batch_id, index):
job_region, log_path = get_log_path()
job_url = (
f"https://{job_region}.console.aws.amazon.com/batch/home?region={job_region}#jobs/detail/"
f"{os.environ['AWS_BATCH_JOB_ID']}"
)
job_cloudwatch_logs = (
f"https://{job_region}.console.aws.amazon.com/cloudwatch/home?region={job_region}#"
f"logsV2:log-groups/log-group/$252Faws$252Fbatch$252Fjob/log-events/{log_path}"
)
table.update_item(
Key={"pk": drive_id, "sk": file_id},
UpdateExpression="SET "
"parquet_extraction_batch_job = :batch_url, "
"parquet_extraction_job_logs = :cloudwatch_logs",
ExpressionAttributeValues={
":cloudwatch_logs": job_cloudwatch_logs,
":batch_url": job_url,
},
)
table.update_item(
Key={"pk": batch_id, "sk": index},
UpdateExpression="SET "
"parquet_extraction_batch_job = :batch_url, "
"parquet_extraction_job_logs = :cloudwatch_logs",
ExpressionAttributeValues={
":cloudwatch_logs": job_cloudwatch_logs,
":batch_url": job_url,
},
)
def main(table_name, index, batch_id, zip_path, local_output_path, topics, target_bucket) -> int:
logger.info("batch_id: %s", batch_id)
logger.info("index: %s", index)
logger.info("table_name: %s", table_name)
logger.info("zip_path: %s", zip_path)
logger.info("local_output_path: %s", local_output_path)
logger.info("topics: %s", topics)
logger.info("target_bucket: %s", target_bucket)
# Getting Item to Process
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
item = table.get_item(
Key={"pk": batch_id, "sk": index},
).get("Item", {})
logger.info("Item Pulled: %s", item)
if not item:
raise ValueError(f"pk: {batch_id} sk: {index} not existing in table: {table_name}")
drive_id = item["drive_id"]
file_id = item["file_id"]
s3 = boto3.client("s3")
save_job_url_and_logs(table, drive_id, file_id, batch_id, index)
logger.info("Downloading zip file")
s3.download_file(item["s3_bucket"], item["s3_key"], zip_path)
logger.info(f"Zip downloaded to {zip_path}")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.dirname(zip_path))
logger.info(f"Extracted zip to {os.path.dirname(zip_path)}")
file_name = item["file_id"].replace(".zip", "")
ros2_path = f"{os.path.dirname(zip_path)}/{file_name}"
logger.info(f"ROS2 directory at {ros2_path}")
all_files = []
for topic in topics:
logger.info(f"Getting data from topic: {topic}")
ros2_obj = ParquetFromBag(
topic=topic,
ros2_path=ros2_path,
output_path=local_output_path,
drive_id=drive_id,
file_id=file_id,
)
all_files.append(ros2_obj.file)
# Sync results
logger.info(f"Uploading results - {target_bucket}")
uploaded_directories = upload(s3, target_bucket, drive_id, file_id, all_files)
logger.info("Uploaded results")
logger.info("Writing job status to DynamoDB")
table.update_item(
Key={"pk": item["drive_id"], "sk": item["file_id"]},
UpdateExpression="SET "
"parquet_extraction_status = :parquet_status, "
"raw_parquet_dirs = :raw_parquet_dirs, "
"raw_parquet_bucket = :raw_parquet_bucket",
ExpressionAttributeValues={
":parquet_status": "success",
":raw_parquet_dirs": uploaded_directories,
":raw_parquet_bucket": target_bucket,
},
)
table.update_item(
Key={"pk": batch_id, "sk": index},
UpdateExpression="SET "
"parquet_extraction_status = :parquet_status, "
"raw_parquet_dirs = :raw_parquet_dirs, "
"raw_parquet_bucket = :raw_parquet_bucket",
ExpressionAttributeValues={
":parquet_status": "success",
":raw_parquet_dirs": uploaded_directories,
":raw_parquet_bucket": target_bucket,
},
)
return 0
if __name__ == "__main__":
# Arguments passed from DAG Code
parser = argparse.ArgumentParser(description="Process Files")
parser.add_argument("--tablename", required=True)
parser.add_argument("--index", required=True)
parser.add_argument("--batchid", required=True)
parser.add_argument("--sensortopics", required=True)
parser.add_argument("--targetbucket", required=True)
args = parser.parse_args()
logger.debug("ARGS: %s", args)
sys.exit(
main(
batch_id=args.batchid,
index=args.index,
table_name=args.tablename,
zip_path="/tmp/ros.zip",
local_output_path="/tmp/output",
topics=json.loads(args.sensortopics),
target_bucket=args.targetbucket,
)
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/tests/test_app.py | modules/sensor-extraction/ros-to-parquet/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"] = (
"arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository"
)
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = (
"arn:aws:iam::123456789012:policy/addf-aws-solutions-wip-policy-full-access"
)
os.environ["SEEDFARMER_PARAMETER_MEMORY_MIB"] = "8192"
os.environ["SEEDFARMER_PARAMETER_PLATFORM"] = "FARGATE"
os.environ["SEEDFARMER_PARAMETER_RETRIES"] = "1"
os.environ["SEEDFARMER_PARAMETER_TIMEOUT_SECONDS"] = "1800"
os.environ["SEEDFARMER_PARAMETER_VCPUS"] = "2"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_missing_app_policy(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_missing_app_ecr_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_wrong_platform(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_PLATFORM"]
os.environ["SEEDFARMER_PARAMETER_PLATFORM"] = "GIBBERISH"
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/tests/test_stack.py | modules/sensor-extraction/ros-to-parquet/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
ros_to_parquet = stack.RosToParquetBatchJob(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
platform="FARGATE",
ecr_repository_arn="arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository",
s3_access_policy="'arn:aws:iam::123456789012:policy/addf-buckets-us-west-2-123-full-access",
retries=1,
timeout_seconds=1800,
vcpus=2,
memory_limit_mib=8192,
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(ros_to_parquet)
template.resource_count_is("AWS::Lambda::Function", 1)
template.resource_count_is("AWS::Batch::JobDefinition", 1)
template.resource_count_is("AWS::IAM::Role", 2)
# Job Definition props
template.has_resource_properties(
type="AWS::Batch::JobDefinition",
props={
"ContainerProperties": {
"Command": ["bash", "entrypoint.sh"],
"ReadonlyRootFilesystem": False,
"ResourceRequirements": [
{"Type": "MEMORY", "Value": "8192"},
{"Type": "VCPU", "Value": "2"},
],
},
"PlatformCapabilities": ["FARGATE"],
"RetryStrategy": {"Attempts": 1},
"Timeout": {"AttemptDurationSeconds": 1800},
},
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-parquet/tests/__init__.py | modules/sensor-extraction/ros-to-parquet/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/stack.py | modules/sensor-extraction/ros-to-png/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from typing import Any, Dict, cast
import aws_cdk.aws_batch as batch
import aws_cdk.aws_ecr as ecr
import aws_cdk.aws_ecs as ecs
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Duration, Size, Stack, Tags
from aws_cdk.aws_ecr_assets import DockerImageAsset
from cdk_ecr_deployment import DockerImageName, ECRDeployment
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class RosToPngBatchJob(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
ecr_repository_arn: str,
s3_access_policy: str,
batch_config: Dict[str, Any],
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment",
value="aws",
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
repo = ecr.Repository.from_repository_arn(self, "Repository", ecr_repository_arn)
local_image = DockerImageAsset(
self,
"RosToPng",
directory=os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"),
)
image_uri = f"{repo.repository_uri}:latest"
ECRDeployment(
self,
"RosToPngURI",
src=DockerImageName(local_image.image_uri),
dest=DockerImageName(image_uri),
)
policy_statements = [
iam.PolicyStatement(
actions=["dynamodb:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:dynamodb:{self.region}:{self.account}:table/{project_name}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["s3:GetObject", "s3:GetObjectAcl", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{project_name}-*",
f"arn:{self.partition}:s3:::{project_name}-*/*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
role = iam.Role(
self,
f"{repo.repository_name}-batch-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
iam.ManagedPolicy.from_managed_policy_arn(self, id="fullaccess", managed_policy_arn=s3_access_policy),
],
max_session_duration=Duration.hours(12),
)
batch_env = {
"AWS_DEFAULT_REGION": self.region,
"AWS_ACCOUNT_ID": self.account,
"DEBUG": "true",
}
if batch_config.get("resized_width"):
batch_env["RESIZE_WIDTH"] = str(batch_config["resized_width"])
if batch_config.get("resized_height"):
batch_env["RESIZE_HEIGHT"] = str(batch_config["resized_height"])
self.batch_job = batch.EcsJobDefinition(
self,
"batch-job-def-from-ecr",
container=batch.EcsEc2ContainerDefinition(
self,
"batch-container-def",
image=ecs.ContainerImage.from_ecr_repository(repo, "latest"),
command=["bash", "entrypoint.sh"],
environment=batch_env,
job_role=role,
execution_role=role,
memory=Size.mebibytes(batch_config["memory_limit_mib"]),
cpu=batch_config["vcpus"],
volumes=[
batch.EcsVolume.host(
name="scratch",
container_path="/mnt/ebs",
readonly=False,
),
],
),
job_definition_name=repo.repository_name,
retry_attempts=batch_config["retries"],
timeout=Duration.seconds(batch_config["timeout_seconds"]),
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/app.py | modules/sensor-extraction/ros-to-png/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import RosToPngBatchJob
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
ecr_repository_arn = os.getenv(_param("ECR_REPOSITORY_ARN"))
full_access_policy = os.getenv(_param("FULL_ACCESS_POLICY_ARN"))
retries = int(os.getenv(_param("RETRIES"), 1))
timeout_seconds = int(os.getenv(_param("TIMEOUT_SECONDS"), 60))
vcpus = int(os.getenv(_param("VCPUS"), 4))
memory_limit_mib = int(os.getenv(_param("MEMORY_MIB"), 16384))
resized_width = os.getenv(_param("RESIZED_WIDTH"))
resized_height = os.getenv(_param("RESIZED_HEIGHT"))
batch_config = {
"retries": retries,
"timeout_seconds": timeout_seconds,
"vcpus": vcpus,
"memory_limit_mib": memory_limit_mib,
}
if resized_width:
batch_config["resized_width"] = int(resized_width)
if resized_height:
batch_config["resized_height"] = int(resized_height)
if not ecr_repository_arn:
raise ValueError("ECR Repository ARN is missing.")
if not full_access_policy:
raise ValueError("S3 Full Access Policy ARN is missing.")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "(SO9154) Autonomous Driving Data Framework (ADDF) - ros-to-png"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = RosToPngBatchJob(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
ecr_repository_arn=ecr_repository_arn,
s3_access_policy=full_access_policy,
batch_config=batch_config,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"JobDefinitionArn": stack.batch_job.job_definition_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/src/main.py | modules/sensor-extraction/ros-to-png/src/main.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import concurrent
import json
import logging
import os
import shutil
import sys
import time
import zipfile
import boto3
import cv2
import requests
from cv_bridge import CvBridge
from rclpy.serialization import deserialize_message
from rosbag2_py import ConverterOptions, SequentialReader, StorageOptions
from rosidl_runtime_py.utilities import get_message
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d][%(levelname)s][%(threadName)s] %(message)s"
debug = os.environ.get("DEBUG", "False").lower() in [
"true",
"yes",
"1",
]
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=level, format=DEBUG_LOGGING_FORMAT)
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(level)
if debug:
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("s3transfer").setLevel(logging.CRITICAL)
class VideoFromBag:
def __init__(self, topic, images_path):
self.bridge = CvBridge()
output_dir = os.path.join(images_path, topic.replace("/", "_"))
self.video = f"/tmp/{topic.replace('/', '_')}/video.mp4"
logger.info("Get video for topic {}".format(topic))
logger.info(
f"""ffmpeg -r 20 -f image2 -i {output_dir}/frame_%06d.png \
-vcodec libx264 -crf 25 -pix_fmt yuv420p {self.video}"""
)
class ImageFromBag:
def __init__(
self,
topic,
encoding,
ros2_path,
output_path,
resized_width=None,
resized_height=None,
):
self.bridge = CvBridge()
output_dir = os.path.join(output_path, topic.replace("/", "_"))
resize = resized_width is not None and resized_height is not None
if resize:
output_dir = output_dir + f"_resized_{resized_width}_{resized_height}"
logger.info(output_dir)
os.makedirs(output_dir, exist_ok=True)
files = []
storage_options = StorageOptions(uri=ros2_path, storage_id="sqlite3")
converter_options = ConverterOptions("", "")
reader = SequentialReader()
reader.open(storage_options, converter_options)
topic_types = reader.get_all_topics_and_types()
type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
seq = 0
while reader.has_next():
(topic_name, data, timestamp) = reader.read_next()
if topic_name == topic:
msg_type = get_message(type_map[topic_name])
msg = deserialize_message(data, msg_type)
timestamp_str = str(timestamp)
seq_str = "{:07d}".format(seq)
cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding=encoding)
if resize:
cv_image = cv2.resize(cv_image, (resized_width, resized_height))
local_image_name = "frame_{}.png".format(seq_str)
s3_image_name = "frame_{}_{}.png".format(seq_str, timestamp_str)
im_out_path = os.path.join(output_dir, local_image_name)
logger.info("Write image: {} to {}".format(local_image_name, im_out_path))
cv2.imwrite(im_out_path, cv_image)
topic_name = topic_name + f"_resized_{resized_width}_{resized_height}" if resize else topic_name
files.append(
{
"local_image_path": im_out_path,
"timestamp": timestamp_str,
"seq": seq_str,
"topic": topic_name,
"s3_image_name": s3_image_name,
}
)
seq += 1
reader.close()
self.files = files
def upload_file(client, local_image_path, bucket_name, target):
client.upload_file(local_image_path, bucket_name, target)
def upload(client, bucket_name, drive_id, file_id, files):
uploaded_files = []
target_prefixes = set()
items = []
for file in files:
topic = file["topic"].replace("/", "_")
target_prefix = os.path.join(drive_id, file_id.replace(".bag", ""), topic)
target = os.path.join(target_prefix, file["s3_image_name"])
items.append(
{
"local_image_path": file["local_image_path"],
"bucket_name": bucket_name,
"target": target,
}
)
uploaded_files.append(target)
target_prefixes.add(target_prefix)
executor = concurrent.futures.ThreadPoolExecutor(100)
futures = [
executor.submit(
upload_file,
client,
item["local_image_path"],
item["bucket_name"],
item["target"],
)
for item in items
]
concurrent.futures.wait(futures)
return list(target_prefixes)
def get_log_path():
response = requests.get(f"{os.environ['ECS_CONTAINER_METADATA_URI_V4']}", timeout=10)
task_region = response.json()["LogOptions"]["awslogs-region"]
return task_region, response.json()["LogOptions"]["awslogs-stream"].replace("/", "$252F")
def save_job_url_and_logs(table, drive_id, file_id, batch_id, index):
job_region, log_path = get_log_path()
job_url = (
f"https://{job_region}.console.aws.amazon.com/batch/home?region={job_region}#jobs/detail/"
f"{os.environ['AWS_BATCH_JOB_ID']}"
)
job_cloudwatch_logs = (
f"https://{job_region}.console.aws.amazon.com/cloudwatch/home?region={job_region}#"
f"logsV2:log-groups/log-group/$252Faws$252Fbatch$252Fjob/log-events/{log_path}"
)
table.update_item(
Key={"pk": drive_id, "sk": file_id},
UpdateExpression="SET image_extraction_batch_job = :batch_url, image_extraction_job_logs = :cloudwatch_logs",
ExpressionAttributeValues={
":cloudwatch_logs": job_cloudwatch_logs,
":batch_url": job_url,
},
)
table.update_item(
Key={"pk": batch_id, "sk": index},
UpdateExpression="SET image_extraction_batch_job = :batch_url, image_extraction_job_logs = :cloudwatch_logs",
ExpressionAttributeValues={
":cloudwatch_logs": job_cloudwatch_logs,
":batch_url": job_url,
},
)
def extract_images(ros2_path, topic, resized_width, resized_height, encoding, images_path):
all_files = []
logger.info(f"Getting images from topic: {topic} with encoding {encoding}")
try:
ros2_obj = ImageFromBag(topic, encoding, ros2_path, images_path)
all_files += ros2_obj.files
logger.info(f"Raw Images extracted from topic: {topic} with encoding {encoding}")
if resized_width and resized_height:
logger.info(
f"Resized Images extracted from topic: {topic} with encoding {encoding}"
f" with new size {resized_width} x {resized_height}"
)
ros2_obj = ImageFromBag(topic, encoding, ros2_path, images_path, resized_width, resized_height)
all_files += ros2_obj.files
logger.info(f"Images extracted from topic: {topic} with encoding {encoding}")
except Exception as e:
logger.error(f"Error processing ROS2 data: {e}")
return all_files
def main(table_name, index, batch_id, zip_path, images_path, topics, encoding, target_bucket) -> int:
logger.info("batch_id: %s", batch_id)
logger.info("index: %s", index)
logger.info("table_name: %s", table_name)
logger.info("zip_path: %s", zip_path)
logger.info("images_path: %s", images_path)
logger.info("topics: %s", topics)
logger.info("encoding: %s", encoding)
logger.info("target_bucket: %s", target_bucket)
resized_width = int(os.environ["RESIZE_WIDTH"])
resized_height = int(os.environ["RESIZE_HEIGHT"])
logger.info("resized_width: %s", resized_width)
logger.info("resized_height: %s", resized_height)
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(table_name)
item = table.get_item(
Key={"pk": batch_id, "sk": index},
).get("Item", {})
logger.info("Item Pulled: %s", item)
if not item:
raise ValueError(f"pk: {batch_id} sk: {index} not existing in table: {table_name}")
drive_id = item["drive_id"]
file_id = item["file_id"]
s3 = boto3.client("s3")
save_job_url_and_logs(table, drive_id, file_id, batch_id, index)
logger.info("Downloading zip file")
s3.download_file(item["s3_bucket"], item["s3_key"], zip_path)
logger.info(f"Zip downloaded to {zip_path}")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.dirname(zip_path))
logger.info(f"Extracted zip to {os.path.dirname(zip_path)}")
file_name = item["file_id"].replace(".zip", "")
ros2_path = f"{os.path.dirname(zip_path)}/{file_name}"
logger.info(f"ROS2 directory at {ros2_path}")
uploaded_directories = []
for topic in topics:
all_files = extract_images(ros2_path, topic, resized_width, resized_height, encoding, images_path)
logger.info(f"Uploading results - {target_bucket}")
topic_uploaded_directories = upload(s3, target_bucket, drive_id, file_id, all_files)
uploaded_directories += topic_uploaded_directories
logger.info("Uploaded results")
raw_image_dirs = [d for d in uploaded_directories if "resized" not in d]
resized_image_dirs = [d for d in uploaded_directories if "resized" in d]
logger.info("Writing job status to DynamoDB")
table.update_item(
Key={"pk": item["drive_id"], "sk": item["file_id"]},
UpdateExpression="SET "
"image_extraction_status = :status, "
"raw_image_dirs = :raw_image_dirs, "
"resized_image_dirs = :resized_image_dirs, "
"raw_image_bucket = :raw_image_bucket, "
"s3_key = :s3_key, "
"s3_bucket = :s3_bucket,"
"batch_id = :batch_id,"
"array_index = :index,"
"drive_id = :drive_id,"
"file_id = :file_id",
ExpressionAttributeValues={
":status": "success",
":raw_image_dirs": raw_image_dirs,
":resized_image_dirs": resized_image_dirs,
":raw_image_bucket": target_bucket,
":batch_id": batch_id,
":index": index,
":s3_key": item["s3_key"],
":s3_bucket": item["s3_bucket"],
":drive_id": item["drive_id"],
":file_id": item["file_id"],
},
)
table.update_item(
Key={"pk": batch_id, "sk": index},
UpdateExpression="SET "
"image_extraction_status = :status, "
"raw_image_dirs = :raw_image_dirs, "
"resized_image_dirs = :resized_image_dirs, "
"raw_image_bucket = :raw_image_bucket, "
"s3_key = :s3_key, "
"s3_bucket = :s3_bucket,"
"batch_id = :batch_id,"
"array_index = :index",
ExpressionAttributeValues={
":status": "success",
":raw_image_dirs": raw_image_dirs,
":resized_image_dirs": resized_image_dirs,
":raw_image_bucket": target_bucket,
":batch_id": batch_id,
":index": index,
":s3_key": item["s3_key"],
":s3_bucket": item["s3_bucket"],
},
)
shutil.rmtree(local_dir)
return 0
if __name__ == "__main__":
# Arguments passed from DAG Code
parser = argparse.ArgumentParser(description="Process Files")
parser.add_argument("--tablename", required=True)
parser.add_argument("--index", required=True)
parser.add_argument("--batchid", required=True)
parser.add_argument("--imagetopics", required=True)
parser.add_argument("--desiredencoding", required=True)
parser.add_argument("--targetbucket", required=True)
args = parser.parse_args()
unique_id = f"{args.index}_{str(int(time.time()))}"
local_dir = f"/mnt/ebs/{unique_id}"
logger.debug("ARGS: %s", args)
os.mkdir(local_dir)
sys.exit(
main(
batch_id=args.batchid,
index=args.index,
table_name=args.tablename,
zip_path=f"{local_dir}/ros.zip",
images_path=f"{local_dir}/images/",
topics=json.loads(args.imagetopics),
encoding=args.desiredencoding,
target_bucket=args.targetbucket,
)
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/tests/test_app.py | modules/sensor-extraction/ros-to-png/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"] = (
"arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository"
)
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = (
"arn:aws:iam::123456789012:policy/addf-aws-solutions-wip-policy-full-access"
)
os.environ["SEEDFARMER_PARAMETER_MEMORY_MIB"] = "8192"
os.environ["SEEDFARMER_PARAMETER_PLATFORM"] = "FARGATE"
os.environ["SEEDFARMER_PARAMETER_RESIZED_HEIGHT"] = "720"
os.environ["SEEDFARMER_PARAMETER_RESIZED_WIDTH"] = "1280"
os.environ["SEEDFARMER_PARAMETER_RETRIES"] = "1"
os.environ["SEEDFARMER_PARAMETER_TIMEOUT_SECONDS"] = "1800"
os.environ["SEEDFARMER_PARAMETER_VCPUS"] = "2"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_missing_app_policy(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_missing_app_ecr_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/tests/test_stack.py | modules/sensor-extraction/ros-to-png/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
batch_config = {
"retries": 1,
"timeout_seconds": 1800,
"vcpus": 2,
"memory_limit_mib": 8192,
"resized_width": 1280,
"resized_height": 720,
}
ros_to_png = stack.RosToPngBatchJob(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
ecr_repository_arn="arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository",
s3_access_policy="arn:aws:iam::123456789012:policy/addf-buckets-us-west-2-123-full-access",
batch_config=batch_config,
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(ros_to_png)
template.resource_count_is("AWS::Lambda::Function", 1)
template.resource_count_is("AWS::Batch::JobDefinition", 1)
template.resource_count_is("AWS::IAM::Role", 2)
# Check batch job definition properties
template.has_resource_properties(
type="AWS::Batch::JobDefinition",
props={
"ContainerProperties": {
"MountPoints": [
{
"ContainerPath": "/mnt/ebs",
"ReadOnly": False,
"SourceVolume": "scratch",
}
],
"ReadonlyRootFilesystem": False,
"ResourceRequirements": [
{"Type": "MEMORY", "Value": "8192"},
{"Type": "VCPU", "Value": "2"},
],
"Volumes": [{"Name": "scratch"}],
}
},
)
def test_synthesize_stack_without_resize(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
batch_config = {
"retries": 1,
"timeout_seconds": 1800,
"vcpus": 2,
"memory_limit_mib": 8192,
}
ros_to_png = stack.RosToPngBatchJob(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
ecr_repository_arn="arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository",
s3_access_policy="arn:aws:iam::123456789012:policy/addf-buckets-us-west-2-123-full-access",
batch_config=batch_config,
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(ros_to_png)
template.resource_count_is("AWS::Lambda::Function", 1)
template.resource_count_is("AWS::Batch::JobDefinition", 1)
template.resource_count_is("AWS::IAM::Role", 2)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/sensor-extraction/ros-to-png/tests/__init__.py | modules/sensor-extraction/ros-to-png/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/studio_stack.py | modules/beta/emrstudio-on-eks/studio_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# type: ignore
import random
from typing import List, cast
from aws_cdk import CfnOutput, Stack, Tags
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_emr as emr
from aws_cdk import aws_emrcontainers as emrc
from aws_cdk import aws_iam as iam
from aws_cdk import aws_s3 as s3
from aws_cdk import custom_resources as custom
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
from OpenSSL import crypto
"""
This stack deploys the following:
- EMR on EKS virtual cluster
- EMR Studio
"""
class StudioLiveStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
project: str,
deployment: str,
module: str,
vpc_id: str,
private_subnet_ids: List[str],
artifact_bucket_name: str,
eks_cluster_name: str,
execution_role_arn: str,
emr_namespace: str,
sso_username: str,
**kwargs,
) -> None:
super().__init__(scope, id, description="This stack deploys EMR Studio", **kwargs)
dep_mod = f"{project}-{deployment}-{module}"
dep_mod = dep_mod[:27]
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
# EMR virtual cluster
self.emr_vc = emrc.CfnVirtualCluster(
scope=self,
id=f"{dep_mod}-EMRVirtualCluster",
container_provider=emrc.CfnVirtualCluster.ContainerProviderProperty(
id=eks_cluster_name,
info=emrc.CfnVirtualCluster.ContainerInfoProperty(
eks_info=emrc.CfnVirtualCluster.EksInfoProperty(namespace=emr_namespace)
),
type="EKS",
),
name=f"{dep_mod}-EMRCluster",
)
# policy to let Lambda invoke the api
custom_policy_document = iam.PolicyDocument(
statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"ec2:CreateSecurityGroup",
"ec2:RevokeSecurityGroupEgress",
"ec2:CreateSecurityGroup",
"ec2:DeleteSecurityGroup",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:RevokeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
],
resources=["*"],
)
]
)
managed_policy = iam.ManagedPolicy(self, f"{id}-ManagedPolicy", document=custom_policy_document)
self.role = iam.Role(
scope=self,
id=f"{id}-LambdaRole",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
managed_policy,
],
)
# cert for endpoint
crt, pkey = self.cert_gen(serialNumber=random.randint(1000, 10000))
mycert = custom.AwsCustomResource(
self,
f"{id}-CreateCert",
on_update={
"service": "ACM",
"action": "importCertificate",
"parameters": {
"Certificate": crt.decode("utf-8"),
"PrivateKey": pkey.decode("utf-8"),
},
"physical_resource_id": custom.PhysicalResourceId.from_response("CertificateArn"),
},
policy=custom.AwsCustomResourcePolicy.from_sdk_calls(resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
role=self.role,
function_name="CreateCertFn",
)
# Set up managed endpoint for Studio
endpoint = custom.AwsCustomResource(
self,
f"{id}-CreateEndpoint",
on_create={
"service": "EMRcontainers",
"action": "createManagedEndpoint",
"parameters": {
"certificateArn": mycert.get_response_field("CertificateArn"),
"executionRoleArn": execution_role_arn,
"name": "emr-endpoint-eks-spark",
"releaseLabel": "emr-6.2.0-latest",
"type": "JUPYTER_ENTERPRISE_GATEWAY",
"virtualClusterId": self.emr_vc.attr_id,
},
"physical_resource_id": custom.PhysicalResourceId.from_response("arn"),
},
policy=custom.AwsCustomResourcePolicy.from_sdk_calls(resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
role=self.role,
function_name="CreateEpFn",
)
endpoint.node.add_dependency(mycert)
# Studio live
# ArtifactBucket for backing Workspace and notebook files
bucket = s3.Bucket.from_bucket_name(self, f"{id}-artifacts-bucket", artifact_bucket_name)
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
self.private_subnets = []
for idx, subnet_id in enumerate(private_subnet_ids):
self.private_subnets.append(ec2.Subnet.from_subnet_id(scope=self, id=f"subnet{idx}", subnet_id=subnet_id))
# Create security groups
eng_sg = ec2.SecurityGroup(
self,
"EngineSecurityGroup",
vpc=self.vpc,
description="EMR Studio Engine",
allow_all_outbound=True,
)
Tags.of(eng_sg).add("for-use-with-amazon-emr-managed-policies", "true")
ws_sg = ec2.SecurityGroup(
self,
"WorkspaceSecurityGroup",
vpc=self.vpc,
description="EMR Studio Workspace",
allow_all_outbound=False,
)
Tags.of(ws_sg).add("for-use-with-amazon-emr-managed-policies", "true")
ws_sg.add_egress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), "allow egress on port 443")
ws_sg.add_egress_rule(eng_sg, ec2.Port.tcp(18888), "allow egress on port 18888 to eng")
eng_sg.add_ingress_rule(ws_sg, ec2.Port.tcp(18888), "allow ingress on port 18888 from ws")
# Studio Service Role
role = iam.Role(
self,
"StudioServiceRole",
assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"),
managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess")],
)
Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true")
role.add_to_policy(
iam.PolicyStatement(
resources=["*"],
actions=[
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeSecurityGroups",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface",
"ec2:DeleteNetworkInterfacePermission",
"ec2:DescribeNetworkInterfaces",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeTags",
"ec2:DescribeInstances",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"elasticmapreduce:ListInstances",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListSteps",
],
effect=iam.Effect.ALLOW,
)
)
# Studio User Role
user_role = iam.Role(
self,
"StudioUserRole",
assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"),
)
Tags.of(role).add("for-use-with-amazon-emr-managed-policies", "true")
user_role.add_to_policy(
iam.PolicyStatement(
actions=[
"elasticmapreduce:CreateEditor",
"elasticmapreduce:DescribeEditor",
"elasticmapreduce:ListEditors",
"elasticmapreduce:StartEditor",
"elasticmapreduce:StopEditor",
"elasticmapreduce:DeleteEditor",
"elasticmapreduce:OpenEditorInConsole",
"elasticmapreduce:AttachEditor",
"elasticmapreduce:DetachEditor",
"elasticmapreduce:CreateRepository",
"elasticmapreduce:DescribeRepository",
"elasticmapreduce:DeleteRepository",
"elasticmapreduce:ListRepositories",
"elasticmapreduce:LinkRepository",
"elasticmapreduce:UnlinkRepository",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListSteps",
"elasticmapreduce:CreatePersistentAppUI",
"elasticmapreduce:DescribePersistentAppUI",
"elasticmapreduce:GetPersistentAppUIPresignedURL",
"secretsmanager:CreateSecret",
"secretsmanager:ListSecrets",
"secretsmanager:TagResource",
"emr-containers:DescribeVirtualCluster",
"emr-containers:ListVirtualClusters",
"emr-containers:DescribeManagedEndpoint",
"emr-containers:ListManagedEndpoints",
"emr-containers:CreateAccessTokenForManagedEndpoint",
"emr-containers:DescribeJobRun",
"emr-containers:ListJobRuns",
],
resources=["*"],
effect=iam.Effect.ALLOW,
)
)
user_role.add_to_policy(
iam.PolicyStatement(
resources=["*"],
actions=[
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ProvisionProduct",
"servicecatalog:SearchProducts",
"servicecatalog:UpdateProvisionedProduct",
"servicecatalog:ListProvisioningArtifacts",
"servicecatalog:DescribeRecord",
"cloudformation:DescribeStackResources",
"kms:Decrypt",
"kms:GenerateDataKey",
"kms:ReEncryptFrom",
"kms:ReEncryptTo",
"kms:DescribeKey",
],
effect=iam.Effect.ALLOW,
)
)
user_role.add_to_policy(
iam.PolicyStatement(
resources=["*"],
actions=["elasticmapreduce:RunJobFlow"],
effect=iam.Effect.ALLOW,
)
)
user_role.add_to_policy(
iam.PolicyStatement(
resources=[
role.role_arn,
f"arn:{self.partition}:iam::{self.account}:role/EMR_DefaultRole",
f"arn:{self.partition}:iam::{self.account}:role/EMR_EC2_DefaultRole",
],
actions=["iam:PassRole"],
effect=iam.Effect.ALLOW,
)
)
user_role.add_to_policy(
iam.PolicyStatement(
resources=[f"arn:{self.partition}:s3:::*"],
actions=[
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
],
effect=iam.Effect.ALLOW,
)
)
user_role.add_to_policy(
iam.PolicyStatement(
resources=[
f"arn:{self.partition}:s3:::{bucket.bucket_name}",
f"arn:{self.partition}:s3:::{bucket.bucket_name}/*",
f"arn:{self.partition}:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*",
],
actions=["s3:GetObject"],
effect=iam.Effect.ALLOW,
)
)
# User Session Mapping permissions
policy_document = {
"Version": "2012-10-17T00:00:00.000Z",
"Statement": [
{
"Action": [
"elasticmapreduce:CreateEditor",
"elasticmapreduce:DescribeEditor",
"elasticmapreduce:ListEditors",
"elasticmapreduce:StartEditor",
"elasticmapreduce:StopEditor",
"elasticmapreduce:DeleteEditor",
"elasticmapreduce:OpenEditorInConsole",
"elasticmapreduce:AttachEditor",
"elasticmapreduce:DetachEditor",
"elasticmapreduce:CreateRepository",
"elasticmapreduce:DescribeRepository",
"elasticmapreduce:DeleteRepository",
"elasticmapreduce:ListRepositories",
"elasticmapreduce:LinkRepository",
"elasticmapreduce:UnlinkRepository",
"elasticmapreduce:DescribeCluster",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListSteps",
"elasticmapreduce:CreatePersistentAppUI",
"elasticmapreduce:DescribePersistentAppUI",
"elasticmapreduce:GetPersistentAppUIPresignedURL",
"secretsmanager:CreateSecret",
"secretsmanager:ListSecrets",
"emr-containers:DescribeVirtualCluster",
"emr-containers:ListVirtualClusters",
"emr-containers:DescribeManagedEndpoint",
"emr-containers:ListManagedEndpoints",
"emr-containers:CreateAccessTokenForManagedEndpoint",
"emr-containers:DescribeJobRun",
"emr-containers:ListJobRuns",
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowBasicActions",
},
{
"Action": [
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ProvisionProduct",
"servicecatalog:SearchProducts",
"servicecatalog:UpdateProvisionedProduct",
"servicecatalog:ListProvisioningArtifacts",
"servicecatalog:DescribeRecord",
"cloudformation:DescribeStackResources",
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowIntermediateActions",
},
{
"Action": ["elasticmapreduce:RunJobFlow"],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowAdvancedActions",
},
{
"Action": "iam:PassRole",
"Resource": [
role.role_arn,
f"arn:{self.partition}:iam::{self.account}:role/EMR_DefaultRole",
f"arn:{self.partition}:iam::{self.account}:role/EMR_EC2_DefaultRole",
],
"Effect": "Allow",
"Sid": "PassRolePermission",
},
{
"Action": [
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
],
"Resource": f"arn:{self.partition}:s3:::*",
"Effect": "Allow",
"Sid": "AllowS3ListAndLocationPermissions",
},
{
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:DeleteObject",
],
"Resource": [
f"arn:{self.partition}:s3:::{bucket.bucket_name}",
f"arn:{self.partition}:s3:::{bucket.bucket_name}/*",
f"arn:{self.partition}:s3:::aws-logs-{self.account}-{self.region}/elasticmapreduce/*",
],
"Effect": "Allow",
"Sid": "AllowS3ReadOnlyAccessToLogs",
},
],
}
custom_policy_document = iam.PolicyDocument.from_json(policy_document)
studio_user_session_policy = iam.ManagedPolicy(self, "StudioUserSessionPolicy", document=custom_policy_document)
# Set up Studio
self.studio = emr.CfnStudio(
self,
f"{id}-EmrStudio",
auth_mode="SSO",
default_s3_location=f"s3://{bucket.bucket_name}/studio/",
engine_security_group_id=eng_sg.security_group_id,
name=f"{id}-EmrStudio",
service_role=role.role_arn,
subnet_ids=private_subnet_ids,
user_role=user_role.role_arn,
vpc_id=vpc_id,
workspace_security_group_id=ws_sg.security_group_id,
description=None,
tags=None,
)
CfnOutput(self, id="StudioUrl", value=self.studio.attr_url)
# Create session mapping
emr.CfnStudioSessionMapping(
self,
f"{id}-StudioSM",
identity_name=sso_username,
identity_type="USER",
session_policy_arn=studio_user_session_policy.managed_policy_arn,
studio_id=self.studio.attr_studio_id,
)
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
},
{
"id": "AwsSolutions-L1",
"reason": "Not creating the Lambda directly",
},
],
)
def cert_gen(
self,
emailAddress="emailAddress",
commonName="emroneks.com",
countryName="NT",
localityName="localityName",
stateOrProvinceName="stateOrProvinceName",
organizationName="organizationName",
organizationUnitName="organizationUnitName",
serialNumber=1234,
validityStartInSeconds=0,
validityEndInSeconds=10 * 365 * 24 * 60 * 60,
KEY_FILE="private.key",
CERT_FILE="selfsigned.crt",
):
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = countryName
cert.get_subject().ST = stateOrProvinceName
cert.get_subject().L = localityName
cert.get_subject().O = organizationName # noqa: E741
cert.get_subject().OU = organizationUnitName
cert.get_subject().CN = commonName
cert.get_subject().emailAddress = emailAddress
cert.set_serial_number(serialNumber)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(validityEndInSeconds)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, "sha512")
return (
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, k),
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/rbac_stack.py | modules/beta/emrstudio-on-eks/rbac_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
import logging
from typing import Any, cast
from aws_cdk import CfnJson, Stack, Tags
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
"""
This stack deploys the following:
- EKS RBAC configuration to support EMR on EKS
"""
class EmrEksRbacStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project: str,
deployment: str,
module: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_oidc_arn: str,
eks_openid_issuer: str,
emr_namespace: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack deploys EMR Studio RBAC Configuration",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
dep_mod = f"{project}-{deployment}-{module}"
dep_mod = dep_mod[:27]
# Import EKS Cluster
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_admin_role_arn,
open_id_connect_provider=provider,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
self.emr_namespace = emr_namespace
self.emrsvcrolearn = f"arn:{self.partition}:iam::{self.account}:role/AWSServiceRoleForAmazonEMRContainers"
# Create namespaces for EMR to use
namespace = eks_cluster.add_manifest(
self.emr_namespace,
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {"name": self.emr_namespace},
},
)
# Create k8s cluster role for EMR
emrrole = eks_cluster.add_manifest(
"emrrole",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {"name": "emr-containers", "namespace": self.emr_namespace},
"rules": [
{"apiGroups": [""], "resources": ["namespaces"], "verbs": ["get"]},
{
"apiGroups": [""],
"resources": [
"serviceaccounts",
"services",
"configmaps",
"events",
"pods",
"pods/log",
],
"verbs": [
"get",
"list",
"watch",
"describe",
"create",
"edit",
"delete",
"deletecollection",
"annotate",
"patch",
"label",
],
},
{
"apiGroups": [""],
"resources": ["secrets"],
"verbs": ["create", "patch", "delete", "watch"],
},
{
"apiGroups": ["apps"],
"resources": ["statefulsets", "deployments"],
"verbs": [
"get",
"list",
"watch",
"describe",
"create",
"edit",
"delete",
"annotate",
"patch",
"label",
],
},
{
"apiGroups": ["batch"],
"resources": ["jobs"],
"verbs": [
"get",
"list",
"watch",
"describe",
"create",
"edit",
"delete",
"annotate",
"patch",
"label",
],
},
{
"apiGroups": ["extensions"],
"resources": ["ingresses"],
"verbs": [
"get",
"list",
"watch",
"describe",
"create",
"edit",
"delete",
"annotate",
"patch",
"label",
],
},
{
"apiGroups": ["rbac.authorization.k8s.io"],
"resources": ["roles", "rolebindings"],
"verbs": [
"get",
"list",
"watch",
"describe",
"create",
"edit",
"delete",
"deletecollection",
"annotate",
"patch",
"label",
],
},
],
},
)
emrrole.node.add_dependency(namespace)
# Bind cluster role to user
emrrolebind = eks_cluster.add_manifest(
"emrrolebind",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding",
"metadata": {"name": "emr-containers", "namespace": self.emr_namespace},
"subjects": [
{
"kind": "User",
"name": "emr-containers",
"apiGroup": "rbac.authorization.k8s.io",
}
],
"roleRef": {
"kind": "Role",
"name": "emr-containers",
"apiGroup": "rbac.authorization.k8s.io",
},
},
)
emrrolebind.node.add_dependency(emrrole)
# Job execution role
# Ref: https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/creating-job-execution-role.html
self.job_role = iam.Role(
self,
f"{dep_mod}-EMR_EKS_Job_Role",
assumed_by=iam.ServicePrincipal("elasticmapreduce.amazonaws.com"),
)
self.job_role.add_to_policy(
iam.PolicyStatement(
resources=["*"],
actions=["s3:PutObject", "s3:GetObject", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
)
)
self.job_role.add_to_policy(
iam.PolicyStatement(
resources=[f"arn:{self.partition}:logs:*:*:*"],
actions=[
"logs:PutLogEvents",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
],
effect=iam.Effect.ALLOW,
)
)
# Modify trust policy
string_like = CfnJson(
self,
"ConditionJson",
value={f"{eks_openid_issuer}:sub": f"system:serviceaccount:emr:emr-containers-sa-*-*-{self.account}-*"},
)
self.job_role.assume_role_policy.add_statements(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["sts:AssumeRoleWithWebIdentity"],
principals=[
iam.OpenIdConnectPrincipal(
eks_cluster.open_id_connect_provider,
conditions={"StringLike": string_like},
)
],
)
)
string_aud = CfnJson(
self,
"ConditionJsonAud",
value={f"{eks_openid_issuer}:aud": "sts.amazon.com"},
)
self.job_role.assume_role_policy.add_statements(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["sts:AssumeRoleWithWebIdentity"],
principals=[
iam.OpenIdConnectPrincipal(
eks_cluster.open_id_connect_provider,
conditions={"StringEquals": string_aud},
)
],
)
)
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
},
{
"id": "AwsSolutions-L1",
"reason": "Not creating the Lambda directly",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/app.py | modules/beta/emrstudio-on-eks/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
import cdk_nag
from rbac_stack import EmrEksRbacStack # type: ignore[attr-defined]
from studio_stack import StudioLiveStack # type: ignore[attr-defined]
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID")) # required
private_subnet_ids = json.loads(os.getenv(_param("PRIVATE_SUBNET_IDS"))) # type: ignore[arg-type] # required
if not vpc_id:
raise ValueError("missing input parameter vpc-id")
if not private_subnet_ids:
raise ValueError("missing input parameter private-subnet-ids")
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "") # required
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "") # required
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "") # required
eks_openid_issuer = os.getenv(_param("EKS_OPENID_ISSUER"), "") # required
artifact_bucket_name = os.getenv(_param("ARTIFACT_BUCKET_NAME")) # required
sso_username = os.getenv(_param("SSO_USERNAME")) # required
emr_eks_namespace = os.getenv(_param("EMR_EKS_NAMESPACE"), "emr-studio")
app = aws_cdk.App()
eks_stack = EmrEksRbacStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}-rbac",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project=project_name,
deployment=deployment_name,
module=module_name,
eks_cluster_name=eks_cluster_name,
eks_admin_role_arn=eks_admin_role_arn,
eks_oidc_arn=eks_oidc_arn,
eks_openid_issuer=eks_openid_issuer,
emr_namespace=emr_eks_namespace,
)
emr_studio = StudioLiveStack(
app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
project=project_name,
deployment=deployment_name,
module=module_name,
vpc_id=vpc_id,
private_subnet_ids=private_subnet_ids,
artifact_bucket_name=artifact_bucket_name,
eks_cluster_name=eks_cluster_name,
execution_role_arn=eks_stack.job_role.role_arn,
emr_namespace=emr_eks_namespace,
sso_username=sso_username,
)
aws_cdk.Aspects.of(app).add(cdk_nag.AwsSolutionsChecks(log_ignores=True))
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/cleanup.py | modules/beta/emrstudio-on-eks/cleanup.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# type: ignore
import sys
import time
import boto3
emr_client = boto3.client("emr")
emrc_client = boto3.client("emr-containers")
marker = None
prefix = "addf"
deployment_name = sys.argv[1]
module_name = sys.argv[2]
def list_studios():
"""
List studios
"""
paginator = emr_client.get_paginator("list_studios")
studios_list_iterator = paginator.paginate(PaginationConfig={"MaxItems": 10, "StartingToken": marker})
return studios_list_iterator
def delete_studio(studios_list_iterator):
"""
Deletes the studios
"""
for i in studios_list_iterator:
for studio in i["Studios"]:
if studio["Name"].startswith(f"{prefix}-{deployment_name}-{module_name[0:14]}"):
try:
emr_client.delete_studio(StudioId=studio["StudioId"])
print(f'Deleted the Studio: {studio["StudioId"]}')
except Exception as ex:
print(f'Studio: {studio["StudioId"]} still contains Workspaces. Please delete them')
raise ex
else:
print("Currently there are no Studios detected")
def list_virtual_clusters():
"""
Lists Virtual Clusters
"""
vc_id = None
vc_list_response = emrc_client.list_virtual_clusters(containerProviderType="EKS", states=["RUNNING"])[
"virtualClusters"
]
for vc in vc_list_response:
if vc["name"].startswith(f"{prefix}-{deployment_name}-{module_name[0:14]}"):
vc_id = vc["id"]
return vc_id
def delete_managed_endpoints(vc_id):
"""
Delete Managed endpoints
"""
response = emrc_client.list_managed_endpoints(virtualClusterId=vc_id, states=["ACTIVE", "TERMINATING"])["endpoints"]
print(response)
for mp in response:
if mp["virtualClusterId"] == vc_id:
emrc_client.delete_managed_endpoint(id=mp["id"], virtualClusterId=vc_id)
print(f'Deleted the Managed Endpoint: {mp["id"]}')
def delete_virtual_cluster(vc_id):
"""
Deletes Virtual Cluster
"""
emrc_client.delete_virtual_cluster(id=vc_id)
print(f"Deleted the VirtualCluster: {vc_id}")
if __name__ == "__main__":
studios_list_iterator = list_studios()
delete_studio(studios_list_iterator=studios_list_iterator)
vc_id = list_virtual_clusters()
if vc_id:
delete_managed_endpoints(vc_id=vc_id)
time.sleep(120)
delete_virtual_cluster(vc_id=vc_id)
else:
print("Currently there are no Virtual Clusters detected")
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/tests/test_app.py | modules/beta/emrstudio-on-eks/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import sys
from unittest import mock
import pytest
@pytest.fixture(scope="function", autouse=True)
def stack_defaults():
with mock.patch.dict(os.environ, {}, clear=True):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-123"
os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"] = json.dumps(["subnet-123", "subnet-456"])
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] = "test-cluster"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN"] = "arn:aws:iam::111111111111:role/test-role"
os.environ["SEEDFARMER_PARAMETER_EKS_OIDC_ARN"] = (
"arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXX"
)
os.environ["SEEDFARMER_PARAMETER_EKS_OPENID_ISSUER"] = "oidc.eks.us-east-1.amazonaws.com/id/XXXXXX"
os.environ["SEEDFARMER_PARAMETER_ARTIFACT_BUCKET_NAME"] = "test-bucket"
os.environ["SEEDFARMER_PARAMETER_SSO_USERNAME"] = "sso-username"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
yield
def test_app(stack_defaults):
import app # noqa: F401
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/tests/test_stack.py | modules/beta/emrstudio-on-eks/tests/test_stack.py | import aws_cdk as cdk
import cdk_nag
import pytest
from aws_cdk.assertions import Annotations, Match, Template
@pytest.fixture(scope="function")
def app() -> cdk.App:
return cdk.App()
@pytest.fixture(scope="function")
def rbac_stack(app: cdk.App) -> cdk.Stack:
from rbac_stack import EmrEksRbacStack
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
app_prefix = f"{project_name}-{dep_name}-{mod_name}-rbac"
return EmrEksRbacStack(
scope=app,
id=app_prefix,
project=project_name,
deployment=dep_name,
module=mod_name,
env=cdk.Environment(
account="111111111111",
region="us-east-1",
),
eks_cluster_name="test-cluster",
eks_admin_role_arn="arn:aws:iam::111111111111:role/test-role",
eks_oidc_arn="arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXX",
eks_openid_issuer="oidc.eks.us-east-1.amazonaws.com/id/XXXXXX",
emr_namespace="emr-studio",
)
@pytest.fixture(scope="function")
def studio_stack(app: cdk.App) -> cdk.Stack:
from studio_stack import StudioLiveStack
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
app_prefix = f"{project_name}-{dep_name}-{mod_name}"
return StudioLiveStack(
scope=app,
id=app_prefix,
project=project_name,
deployment=dep_name,
module=mod_name,
env=cdk.Environment(
account="111111111111",
region="us-east-1",
),
vpc_id="vpc-123",
private_subnet_ids=["subnet12", "subnet34"],
artifact_bucket_name="test-bucket",
eks_cluster_name="test-cluster",
execution_role_arn="arn:aws:iam::111111111111:role/test-role",
emr_namespace="test-namespace",
sso_username="test-user",
)
def test_synthesize_stack(rbac_stack: cdk.Stack, studio_stack: cdk.Stack) -> None:
Template.from_stack(rbac_stack)
Template.from_stack(studio_stack)
def test_no_cdk_nag_errors(rbac_stack: cdk.Stack, studio_stack: cdk.Stack) -> None:
cdk.Aspects.of(rbac_stack).add(cdk_nag.AwsSolutionsChecks())
cdk.Aspects.of(studio_stack).add(cdk_nag.AwsSolutionsChecks())
nag_errors = Annotations.from_stack(rbac_stack).find_error(
"*",
Match.string_like_regexp(r"AwsSolutions-.*"),
) + Annotations.from_stack(studio_stack).find_error(
"*",
Match.string_like_regexp(r"AwsSolutions-.*"),
)
assert not nag_errors, f"Found {len(nag_errors)} CDK nag errors"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/beta/emrstudio-on-eks/tests/__init__.py | modules/beta/emrstudio-on-eks/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/stack.py | modules/analysis/rosbag-image-pipeline-sfn/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
from typing import Any, List, cast
import aws_cdk as cdk
from aws_cdk import aws_batch as batch
from aws_cdk import aws_dynamodb as dynamodb
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import aws_lambda as aws_lambda
from aws_cdk import aws_s3 as s3
from aws_cdk import aws_s3_deployment as s3deploy
from aws_cdk import aws_stepfunctions as sfn
from aws_cdk import aws_stepfunctions_tasks as tasks
from constructs import Construct, IConstruct
class TemplateStack(cdk.Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
hash: str,
stack_description: str,
vpc_id: str,
private_subnet_ids: List[str],
emr_job_exec_role_arn: str,
emr_app_id: str,
source_bucket_name: str,
target_bucket_name: str,
artifacts_bucket_name: str,
logs_bucket_name: str,
detection_ddb_name: str,
on_demand_job_queue_arn: str,
fargate_job_queue_arn: str,
parquet_batch_job_def_arn: str,
png_batch_job_def_arn: str,
object_detection_image_uri: str,
object_detection_role_arn: str,
object_detection_job_concurrency: int,
object_detection_instance_type: str,
lane_detection_image_uri: str,
lane_detection_role_arn: str,
lane_detection_job_concurrency: int,
lane_detection_instance_type: str,
file_suffix: str,
desired_encoding: str,
yolo_model: str,
image_topics: List[str],
sensor_topics: List[str],
**kwargs: Any,
) -> None:
super().__init__(scope, id, description=stack_description, **kwargs)
self.project_name = project_name
self.deployment_name = deployment_name
self.module_name = module_name
dep_mod = f"{self.project_name}-{self.deployment_name}-{self.module_name}"
dep_mod = dep_mod[:64]
cdk.Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=dep_mod)
# Sagemaker Security Group
vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
security_group = ec2.SecurityGroup(
self,
"Sagemaker Jobs SG",
vpc=vpc,
allow_all_outbound=True,
description="Sagemaker Processing Jobs SG",
)
security_group.add_ingress_rule(peer=security_group, connection=ec2.Port.all_traffic())
# DynamoDB Tracking Table
tracking_partition_key = "pk" # batch_id or drive_id
tracking_sort_key = "sk" # batch_id / array_index_id or drive_id / file_part
tracking_table = dynamodb.Table(
self,
"Drive Tracking Table",
table_name=f"{dep_mod}-drive-tracking",
partition_key=dynamodb.Attribute(name=tracking_partition_key, type=dynamodb.AttributeType.STRING),
sort_key=dynamodb.Attribute(name=tracking_sort_key, type=dynamodb.AttributeType.STRING),
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
removal_policy=cdk.RemovalPolicy.DESTROY,
point_in_time_recovery=True,
stream=dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
)
# DynamoDB Detection Table
detection_ddb_table = dynamodb.Table.from_table_name(self, "Detection DDB Table", detection_ddb_name)
# Batch definitions
on_demand_job_queue = batch.JobQueue.from_job_queue_arn(self, "On Demand Job Queue", on_demand_job_queue_arn)
fargate_job_queue = batch.JobQueue.from_job_queue_arn(self, "Fargate Job Queue", fargate_job_queue_arn)
# S3 buckets
source_bucket = s3.Bucket.from_bucket_name(self, "Source Bucket", source_bucket_name)
target_bucket = s3.Bucket.from_bucket_name(self, "Target Bucket", target_bucket_name)
artifacts_bucket = s3.Bucket.from_bucket_name(self, "Artifacts Bucket", artifacts_bucket_name)
logs_bucket = s3.Bucket.from_bucket_name(self, "Logs Bucket", logs_bucket_name)
# Define Lambda job for creating a batch of drives
create_batch_lambda_function = aws_lambda.Function(
self,
"CreateBatchOfDrivesFunction",
code=aws_lambda.Code.from_asset("lambda/create-batch-of-drives/src"),
handler="lambda_function.lambda_handler",
runtime=aws_lambda.Runtime.PYTHON_3_10,
environment={
"DYNAMODB_TABLE": tracking_table.table_name,
"FILE_SUFFIX": file_suffix,
},
timeout=cdk.Duration.minutes(15),
)
tracking_table.grant_read_write_data(create_batch_lambda_function)
source_bucket.grant_read(create_batch_lambda_function)
# Define step function
sfn_batch_id = sfn.JsonPath.string_at("$$.Execution.Name")
create_batch_task = tasks.LambdaInvoke(
self,
"Create Batch of Drives",
lambda_function=create_batch_lambda_function,
payload=sfn.TaskInput.from_object(
{
"DrivesToProcess": sfn.JsonPath.string_at("$.DrivesToProcess"),
"ExecutionID": sfn_batch_id,
}
),
result_path="$.LambdaOutput",
result_selector={
"BatchSize.$": "$.Payload.BatchSize",
},
)
image_extraction_step_machine_task = tasks.BatchSubmitJob(
self,
"Image Extraction",
job_definition_arn=png_batch_job_def_arn,
job_name="ros-image-pipeline-png",
job_queue_arn=on_demand_job_queue.job_queue_arn,
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
array_size=sfn.JsonPath.number_at("$.LambdaOutput.BatchSize"),
container_overrides=tasks.BatchContainerOverrides(
environment={
"TABLE_NAME": tracking_table.table_name,
"BATCH_ID": sfn_batch_id,
"DEBUG": "true",
"IMAGE_TOPICS": json.dumps(image_topics),
"DESIRED_ENCODING": desired_encoding,
"TARGET_BUCKET": target_bucket.bucket_name,
},
),
result_path=sfn.JsonPath.DISCARD,
)
parquet_extraction_step_machine_task = tasks.BatchSubmitJob(
self,
"Parquet Extraction",
job_definition_arn=parquet_batch_job_def_arn,
job_name="ros-image-pipeline-parquet",
job_queue_arn=fargate_job_queue.job_queue_arn,
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
array_size=sfn.JsonPath.number_at("$.LambdaOutput.BatchSize"),
container_overrides=tasks.BatchContainerOverrides(
environment={
"TABLE_NAME": tracking_table.table_name,
"BATCH_ID": sfn_batch_id,
"TOPICS": json.dumps(sensor_topics),
"TARGET_BUCKET": target_bucket.bucket_name,
},
),
result_path=sfn.JsonPath.DISCARD,
)
get_image_dirs_task = tasks.CallAwsService(
self,
"Get Image Directories",
service="dynamodb",
action="query",
iam_resources=[tracking_table.table_arn],
parameters={
"TableName": tracking_table.table_name,
"KeyConditionExpression": "pk = :pk",
"ExpressionAttributeValues": {
":pk": {"S": sfn_batch_id},
},
"ProjectionExpression": "resized_image_dirs",
},
result_path="$.ImageDirs",
result_selector={
"S3Paths.$": "$.Items[*].resized_image_dirs.L[*].S",
},
)
object_detection_task = tasks.CallAwsService(
self,
"Object Detection",
service="sagemaker",
action="createProcessingJob",
iam_resources=["*"],
# integration_pattern=sfn.IntegrationPattern.RUN_JOB, # not supported in CDK (as of 2024-02-08)
additional_iam_statements=[
iam.PolicyStatement(
actions=["iam:PassRole"],
resources=[object_detection_role_arn],
),
],
parameters={
"RoleArn": object_detection_role_arn,
"ProcessingJobName": sfn.JsonPath.format(
"Step-{}-YOLO",
sfn.JsonPath.uuid(),
),
"AppSpecification": {
"ImageUri": object_detection_image_uri,
"ContainerArguments": [
"--model",
yolo_model,
],
},
"NetworkConfig": {
"VpcConfig": {
"SecurityGroupIds": [security_group.security_group_id],
"Subnets": private_subnet_ids,
}
},
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": object_detection_instance_type,
"VolumeSizeInGB": 30,
}
},
"ProcessingInputs": [
{
"InputName": "data",
"S3Input": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}/",
sfn.JsonPath.string_at("$"),
),
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
"LocalPath": "/opt/ml/processing/input/",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "output",
"S3Output": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}_post_obj_dets/",
sfn.JsonPath.string_at("$"),
),
"S3UploadMode": "EndOfJob",
"LocalPath": "/opt/ml/processing/output/",
},
}
]
},
},
)
sm_local_input = "/opt/ml/processing/input/image"
sm_local_output = "/opt/ml/processing/output/image"
sm_local_output_json = "/opt/ml/processing/output/json"
sm_local_output_csv = "/opt/ml/processing/output/csv"
lane_detection_task = tasks.CallAwsService(
self,
"Lane Detection",
service="sagemaker",
action="createProcessingJob",
iam_resources=["*"],
# integration_pattern=sfn.IntegrationPattern.RUN_JOB, # not supported in CDK (as of 2024-02-08)
additional_iam_statements=[
iam.PolicyStatement(
actions=["iam:PassRole"],
resources=[lane_detection_role_arn],
),
],
parameters={
"RoleArn": lane_detection_role_arn,
"ProcessingJobName": sfn.JsonPath.format(
"Step-{}-LANE",
sfn.JsonPath.uuid(),
),
"AppSpecification": {
"ImageUri": lane_detection_image_uri,
"ContainerArguments": [
"--save_dir",
sm_local_output,
"--source",
sm_local_input,
"--json_path",
sm_local_output_json,
"--csv_path",
sm_local_output_csv,
],
},
"NetworkConfig": {
"VpcConfig": {
"SecurityGroupIds": [security_group.security_group_id],
"Subnets": private_subnet_ids,
}
},
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": lane_detection_instance_type,
"VolumeSizeInGB": 30,
}
},
"ProcessingInputs": [
{
"InputName": "data",
"S3Input": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}/",
sfn.JsonPath.string_at("$"),
),
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
"LocalPath": sm_local_input,
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "image_output",
"S3Output": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}_post_lane_dets/",
sfn.JsonPath.string_at("$"),
),
"S3UploadMode": "EndOfJob",
"LocalPath": sm_local_output,
},
},
{
"OutputName": "json_output",
"S3Output": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}_post_lane_dets/",
sfn.JsonPath.string_at("$"),
),
"S3UploadMode": "EndOfJob",
"LocalPath": sm_local_output_json,
},
},
{
"OutputName": "csv_output",
"S3Output": {
"S3Uri": sfn.JsonPath.format(
f"s3://{target_bucket.bucket_name}/{{}}_post_lane_dets/",
sfn.JsonPath.string_at("$"),
),
"S3UploadMode": "EndOfJob",
"LocalPath": sm_local_output_csv,
},
},
]
},
},
)
obj_detection_map_task = sfn.Map(
self,
"Object Detection Parallel Map",
items_path="$.ImageDirs.S3Paths",
max_concurrency=object_detection_job_concurrency,
).item_processor(
self.processing_job_add_wait(
id=object_detection_task.id,
start_process_task=object_detection_task,
wait_time_seconds=15,
)
)
lane_detection_map_task = sfn.Map(
self,
"Lane Detection Parallel Map",
items_path="$.ImageDirs.S3Paths",
max_concurrency=lane_detection_job_concurrency,
).item_processor(
self.processing_job_add_wait(
id=lane_detection_task.id,
start_process_task=lane_detection_task,
wait_time_seconds=15,
),
)
emr_task_chain = self.create_emr_task_chain(
emr_app_id=emr_app_id,
emr_job_exec_role_arn=emr_job_exec_role_arn,
target_bucket=target_bucket,
artifacts_bucket=artifacts_bucket,
logs_bucket=logs_bucket,
image_topics=image_topics,
tracking_table=tracking_table,
detection_ddb_table=detection_ddb_table,
batch_id=sfn_batch_id,
)
# Define state machine
definition = (
create_batch_task.next(
sfn.Parallel(self, "Sensor Extraction", result_path=sfn.JsonPath.DISCARD)
.branch(image_extraction_step_machine_task)
.branch(parquet_extraction_step_machine_task)
)
.next(get_image_dirs_task)
.next(
sfn.Parallel(self, "Image Labelling", result_path=sfn.JsonPath.DISCARD)
.branch(obj_detection_map_task)
.branch(lane_detection_map_task)
)
.next(emr_task_chain)
)
self.state_machine = sfn.StateMachine(
self,
"StateMachine",
state_machine_name=f"{project_name}-{deployment_name}-rosbag-image-pipeline-{hash}",
definition_body=sfn.DefinitionBody.from_chainable(definition),
)
def processing_job_add_wait(
self,
id: str,
start_process_task: tasks.CallAwsService,
wait_time_seconds: int,
) -> sfn.IChainable:
start_process_task.add_retry(
errors=["SageMaker.SageMakerException"],
interval=cdk.Duration.seconds(5),
max_attempts=5,
jitter_strategy=sfn.JitterType.FULL,
)
get_job_status_task = tasks.CallAwsService(
self,
f"Get {id} Status",
service="sagemaker",
action="describeProcessingJob",
iam_resources=["*"],
parameters={
"ProcessingJobName": sfn.JsonPath.array_get_item(
sfn.JsonPath.string_split(sfn.JsonPath.string_at("$.ProcessingJobArn"), "/"),
1,
)
},
result_path="$.ProcessingJobStatus",
)
wait_task = sfn.Wait(
self,
f"Wait {id}",
time=sfn.WaitTime.duration(cdk.Duration.seconds(wait_time_seconds)),
)
retry_chain = wait_task.next(get_job_status_task)
success_state = sfn.Succeed(self, f"{id} Succeeded")
fail_state = sfn.Fail(self, f"{id} Failed")
job_status_choice = (
sfn.Choice(self, f"Did {id} finish?")
.when(
sfn.Condition.string_equals("$.ProcessingJobStatus.ProcessingJobStatus", "Completed"),
success_state,
)
.when(
sfn.Condition.or_(
sfn.Condition.string_equals("$.ProcessingJobStatus.ProcessingJobStatus", "Failed"),
sfn.Condition.string_equals("$.ProcessingJobStatus.ProcessingJobStatus", "Stopped"),
),
fail_state,
)
.otherwise(retry_chain)
)
return start_process_task.next(get_job_status_task).next(job_status_choice)
def create_emr_task_chain(
self,
emr_app_id: str,
emr_job_exec_role_arn: str,
artifacts_bucket: s3.IBucket,
target_bucket: s3.IBucket,
logs_bucket: s3.IBucket,
tracking_table: dynamodb.Table,
detection_ddb_table: dynamodb.ITable,
image_topics: List[str],
batch_id: str,
) -> sfn.IChainable:
emr_app_arn = f"arn:{self.partition}:emr-serverless:{self.region}:{self.account}:/applications/{emr_app_id}"
s3_emr_job_prefix = "emr-scripts/"
s3deploy.BucketDeployment(
self,
"S3BucketDagDeploymentTestJob",
sources=[s3deploy.Source.asset(s3_emr_job_prefix)],
destination_bucket=artifacts_bucket,
destination_key_prefix=s3_emr_job_prefix,
)
s3_script_dir = artifacts_bucket.s3_url_for_object(s3_emr_job_prefix)
run_job_task = tasks.CallAwsService(
self,
"Start Scene Detection Job",
service="emrserverless",
action="startJobRun",
iam_resources=[emr_app_arn],
iam_action="emr-serverless:startJobRun",
additional_iam_statements=[
iam.PolicyStatement(
actions=["iam:PassRole"],
resources=[emr_job_exec_role_arn],
),
],
parameters={
"ApplicationId": emr_app_id,
"ExecutionRoleArn": emr_job_exec_role_arn,
"ClientToken": sfn.JsonPath.uuid(),
"Name": sfn.JsonPath.format("scene-detection-step-functons-{}", batch_id),
"JobDriver": {
"SparkSubmit": {
"EntryPoint": f"{s3_script_dir}detect_scenes.py",
"SparkSubmitParameters": f"--jars {s3_script_dir}spark-dynamodb_2.12-1.1.1.jar",
"EntryPointArguments": sfn.JsonPath.array(
"--batch-metadata-table-name",
tracking_table.table_name,
"--batch-id",
batch_id,
"--output-bucket",
target_bucket.bucket_name,
"--region",
self.region,
"--output-dynamo-table",
detection_ddb_table.table_name,
"--image-topics",
json.dumps(image_topics),
),
},
},
"ConfigurationOverrides": {
"MonitoringConfiguration": {
"ManagedPersistenceMonitoringConfiguration": {
"Enabled": True,
},
"S3MonitoringConfiguration": {
"LogUri": logs_bucket.s3_url_for_object("scene-detection"),
},
},
},
},
)
get_job_status_task = tasks.CallAwsService(
self,
"Get Scene Detection Job Status",
service="emrserverless",
action="getJobRun",
iam_resources=[f"{emr_app_arn}/jobruns/*"],
iam_action="emr-serverless:getJobRun",
result_path="$.JobStatus",
parameters={
"ApplicationId": sfn.JsonPath.string_at("$.ApplicationId"),
"JobRunId": sfn.JsonPath.string_at("$.JobRunId"),
},
)
wait_task = sfn.Wait(
self,
"Wait for Scene Detection",
time=sfn.WaitTime.duration(cdk.Duration.seconds(15)),
)
retry_chain = wait_task.next(get_job_status_task)
success_state = sfn.Succeed(self, "Scene Detection Succeeded")
fail_state = sfn.Fail(self, "Scene Detection Failed")
job_status_choice = (
sfn.Choice(self, "Did Scene Detection Finish?")
.when(
sfn.Condition.string_equals("$.JobStatus.JobRun.State", "SUCCESS"),
success_state,
)
.when(
sfn.Condition.or_(
sfn.Condition.string_equals("$.JobStatus.JobRun.State", "FAILED"),
sfn.Condition.string_equals("$.JobStatus.JobRun.State", "CANCELLED"),
),
fail_state,
)
.otherwise(retry_chain)
)
return run_job_task.next(get_job_status_task).next(job_status_choice)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/app.py | modules/analysis/rosbag-image-pipeline-sfn/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
from typing import List, Optional, cast
from aws_cdk import App, CfnOutput, Environment
from stack import TemplateStack
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME")
module_name = os.getenv("SEEDFARMER_MODULE_NAME")
hash = os.getenv("SEEDFARMER_HASH", "")
if len(f"{project_name}-{deployment_name}") > 36:
raise ValueError("This module cannot support a project+deployment name character length greater than 35")
def get_arg_value(name: str, default: Optional[str] = None) -> str:
value = (
os.getenv(f"SEEDFARMER_PARAMETER_{name}", default) if default else os.getenv(f"SEEDFARMER_PARAMETER_{name}", "")
)
if value == "":
raise ValueError(f"required argument {name.replace('_', '-').lower()} is missing")
else:
return value
vpc_id = get_arg_value("VPC_ID")
private_subnet_ids = json.loads(get_arg_value("PRIVATE_SUBNET_IDS"))
emr_job_exec_role_arn = get_arg_value("EMR_JOB_EXEC_ROLE")
emr_app_id = get_arg_value("EMR_APP_ID")
source_bucket_name = get_arg_value("SOURCE_BUCKET")
target_bucket_name = get_arg_value("INTERMEDIATE_BUCKET")
artifacts_bucket_name = get_arg_value("ARTIFACTS_BUCKET_NAME")
logs_bucket_name = get_arg_value("LOGS_BUCKET_NAME")
detection_ddb_name = get_arg_value("ROSBAG_SCENE_METADATA_TABLE")
on_demand_job_queue_arn = get_arg_value("ON_DEMAND_JOB_QUEUE_ARN")
fargate_job_queue_arn = get_arg_value("FARGATE_JOB_QUEUE_ARN")
parquet_batch_job_def_arn = get_arg_value("PARQUET_BATCH_JOB_DEF_ARN")
png_batch_job_def_arn = get_arg_value("PNG_BATCH_JOB_DEF_ARN")
object_detection_image_uri = get_arg_value("OBJECT_DETECTION_IMAGE_URI")
object_detection_role = get_arg_value("OBJECT_DETECTION_IAM_ROLE")
object_detection_job_concurrency = int(get_arg_value("OBJECT_DETECTION_JOB_CONCURRENCY", "10"))
object_detection_instance_type = get_arg_value("OBJECT_DETECTION_INSTANCE_TYPE", "ml.m5.xlarge")
lane_detection_image_uri = get_arg_value("LANE_DETECTION_IMAGE_URI")
lane_detection_role = get_arg_value("LANE_DETECTION_IAM_ROLE")
lane_detection_job_concurrency = int(get_arg_value("LANE_DETECTION_JOB_CONCURRENCY", "5"))
lane_detection_instance_type = get_arg_value("LANE_DETECTION_INSTANCE_TYPE", "ml.p3.2xlarge")
file_suffix = get_arg_value("FILE_SUFFIX", ".zip")
desired_encoding = get_arg_value("DESIRED_ENCODING", "bgr8")
yolo_model = get_arg_value("YOLO_MODEL", "yolo11s")
image_topics: List[str] = json.loads(get_arg_value("IMAGE_TOPICS"))
sensor_topics: List[str] = json.loads(get_arg_value("SENSOR_TOPICS"))
if not isinstance(image_topics, list):
raise ValueError("image_topics must be a list")
if not isinstance(sensor_topics, list):
raise ValueError("sensor_topics must be a list")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "My Module Default Description"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
template_stack = TemplateStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=cast(str, project_name),
deployment_name=cast(str, deployment_name),
module_name=cast(str, module_name),
hash=hash,
stack_description=generate_description(),
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
vpc_id=vpc_id,
private_subnet_ids=private_subnet_ids,
emr_job_exec_role_arn=emr_job_exec_role_arn,
emr_app_id=emr_app_id,
source_bucket_name=source_bucket_name,
target_bucket_name=target_bucket_name,
artifacts_bucket_name=artifacts_bucket_name,
logs_bucket_name=logs_bucket_name,
detection_ddb_name=detection_ddb_name,
on_demand_job_queue_arn=on_demand_job_queue_arn,
fargate_job_queue_arn=fargate_job_queue_arn,
parquet_batch_job_def_arn=parquet_batch_job_def_arn,
png_batch_job_def_arn=png_batch_job_def_arn,
object_detection_image_uri=object_detection_image_uri,
object_detection_role_arn=object_detection_role,
object_detection_job_concurrency=object_detection_job_concurrency,
object_detection_instance_type=object_detection_instance_type,
lane_detection_image_uri=lane_detection_image_uri,
lane_detection_role_arn=lane_detection_role,
lane_detection_job_concurrency=lane_detection_job_concurrency,
lane_detection_instance_type=lane_detection_instance_type,
file_suffix=file_suffix,
desired_encoding=desired_encoding,
yolo_model=yolo_model,
image_topics=image_topics,
sensor_topics=sensor_topics,
)
CfnOutput(
scope=template_stack,
id="metadata",
value=template_stack.to_json_string(
{
"StateMachineArn": template_stack.state_machine.state_machine_arn,
}
),
)
app.synth()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/analysis/rosbag-image-pipeline-sfn/tests/test_app.py | modules/analysis/rosbag-image-pipeline-sfn/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_SENSOR_TOPICS"] = (
'["/vehicle/gps/fix,/vehicle/gps/time,/vehicle/gps/vel,/imu_raw"]'
)
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_INSTANCE_TYPE"] = "ml.m5.2xlarge"
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_JOB_CONCURRENCY"] = "20"
os.environ["SEEDFARMER_PARAMETER_IMAGE_TOPICS"] = (
'["/flir_adk/rgb_front_left/image_raw", "/flir_adk/rgb_front_right/image_raw"]'
)
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_IMAGE_URI"] = (
"1234567891011.dkr.ecr.us-west-2.amazonaws.com/addf-sfn-example-docker-images-object-detection:latest"
)
os.environ["SEEDFARMER_PARAMETER_INTERMEDIATE_BUCKET"] = "addf-sfn-example-intermediate-bucket-foobar"
os.environ["SEEDFARMER_PARAMETER_EMR_JOB_EXEC_ROLE"] = (
"arn:aws:iam::1234567891011:role/addf-sfn-example-core-emr-addfsfnexamplecoreemrserv-nDO07m4q6ZUG"
)
os.environ["SEEDFARMER_PARAMETER_ROSBAG_SCENE_METADATA_TABLE"] = (
"addf-sfn-example-core-metadata-storage-Rosbag-Scene-Metadata"
)
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IAM_ROLE"] = (
"arn:aws:iam::1234567891011:role/addf-sfn-example-docker-i-addfsfnexampledockerimage-Vc0u6FvNyMOu"
)
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_INSTANCE_TYPE"] = "ml.m5.xlarge"
os.environ["SEEDFARMER_PARAMETER_PNG_BATCH_JOB_DEF_ARN"] = (
"arn:aws:batch:us-west-2:1234567891011:job-definition/addf-sfn-example-docker-images-ros-to-png:1"
)
os.environ["SEEDFARMER_PARAMETER_FARGATE_JOB_QUEUE_ARN"] = (
"arn:aws:batch:us-west-2:1234567891011:job-queue/addf-sfn-example-core-batch-compute-FargateJobQueue"
)
os.environ["SEEDFARMER_PARAMETER_DESIRED_ENCODING"] = "bgr8"
os.environ["SEEDFARMER_PARAMETER_EMR_APP_ID"] = "00fgnvtcju9kd50l"
os.environ["SEEDFARMER_PARAMETER_SPOT_JOB_QUEUE_ARN"] = (
"arn:aws:batch:us-west-2:1234567891011:job-queue/addf-sfn-example-core-batch-compute-SpotJobQueue"
)
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = (
"arn:aws:iam::1234567891011:policy/addf-sfn-example-optionals-datalake-buckets-us-west-2-1234567891011-full-access"
)
os.environ["SEEDFARMER_PARAMETER_PARQUET_BATCH_JOB_DEF_ARN"] = (
"arn:aws:batch:us-west-2:1234567891011:job-definition/addf-sfn-example-docker-images-ros-to-parquet:1"
)
os.environ["SEEDFARMER_PARAMETER_LANE_DETECTION_IMAGE_URI"] = (
"1234567891011.dkr.ecr.us-west-2.amazonaws.com/addf-sfn-example-docker-images-lane-detection:smprocessor"
)
os.environ["SEEDFARMER_PARAMETER_LOGS_BUCKET_NAME"] = "addf-sfn-example-logs-bucket-foobar"
os.environ["SEEDFARMER_PARAMETER_PRIVATE_SUBNET_IDS"] = (
'"subnet-0d0dbcc18be75a515,subnet-0975db0ff8077da32,subnet-04672aec504dd73d3"'
)
os.environ["SEEDFARMER_PARAMETER_SOURCE_BUCKET"] = "addf-sfn-example-raw-bucket-foobar"
os.environ["SEEDFARMER_PARAMETER_VPC_ID"] = "vpc-0e166bcb8665cad2f"
os.environ["SEEDFARMER_PARAMETER_ON_DEMAND_JOB_QUEUE_ARN"] = (
"arn:aws:batch:us-west-2:1234567891011:job-queue/addf-sfn-example-core-batch-compute-OnDemandJobQueue"
)
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_IAM_ROLE"] = (
"arn:aws:iam::1234567891011:role/addf-sfn-example-docker-i-addfsfnexampledockerimage-mhcRyBLGyJmc"
)
os.environ["SEEDFARMER_PARAMETER_OBJECT_DETECTION_JOB_CONCURRENCY"] = "30"
os.environ["SEEDFARMER_PARAMETER_ARTIFACTS_BUCKET"] = "addf-sfn-example-artifacts-bucket-foobar"
os.environ["SEEDFARMER_PARAMETER_ARTIFACTS_BUCKET_NAME"] = "addf-sfn-example-artifacts-bucket-foobar"
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_missing_argument(stack_defaults):
with pytest.raises(ValueError) as _e:
import app # noqa: F401
os.environ["SEEDFARMER_PARAMETER_SOURCE_BUCKET"] = ""
app.get_arg_value("SOURCE_BUCKET")
def test_project_deployment_name_length(stack_defaults):
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project-incredibly"
with pytest.raises(Exception) as e:
import app # noqa: F401
assert "module cannot support a project+deployment name character length greater than" in str(e)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.