adaptai / platform /dataops /dto /dto_manifest.yaml
ADAPT-Chase's picture
Add files using upload-large-folder tool
fd357f4 verified
---
# DTO Manifest v1.2 - AdaptAI Data Transfer Operations
# Compliant with OPERATING_AGREEMENT.md standards
apiVersion: dto/v1
kind: ServiceManifest
metadata:
name: adaptai-dto-platform
namespace: dataops
version: "1.0.0"
owner: dto-team
maintainer: chase@adapt.ai
created: "2025-08-27"
description: "Comprehensive Data Transfer Operations platform for AdaptAI monorepo"
# Service catalog and dependencies
services:
infrastructure:
- name: dto-nats-server
type: message-broker
runtime: native
slo_tier: critical
ports: [4222, 8222, 6222]
dependencies: []
- name: dto-dragonfly-cluster
type: cache-cluster
runtime: native
slo_tier: critical
ports: [18000, 18001, 18002, 11211, 11212, 11213]
dependencies: []
- name: dto-janusgraph
type: graph-database
runtime: native
slo_tier: high
ports: [8182]
dependencies: []
applications:
- name: dto-lineage-handler
type: event-processor
runtime: python
slo_tier: high
ports: []
dependencies: [dto-nats-server, dto-janusgraph]
- name: dto-slack-automation
type: integration-service
runtime: python
slo_tier: standard
ports: [3001]
dependencies: [dto-nats-server]
- name: dto-jira-automation
type: integration-service
runtime: python
slo_tier: standard
ports: []
dependencies: [dto-nats-server]
- name: dto-confluence-automation
type: integration-service
runtime: python
slo_tier: standard
ports: []
dependencies: [dto-nats-server]
- name: dto-jira-webhooks
type: webhook-handler
runtime: python
slo_tier: high
ports: [8080]
dependencies: [dto-nats-server, dto-jira-automation]
monitoring:
- name: dto-health-monitor
type: monitoring-service
runtime: python
slo_tier: high
ports: [8090]
dependencies: [dto-nats-server, dto-dragonfly-cluster]
# Environment configurations
environments:
development:
overlay_path: overlays/dev
resource_limits:
cpu_cores: 2
memory_gb: 4
disk_gb: 50
staging:
overlay_path: overlays/staging
resource_limits:
cpu_cores: 4
memory_gb: 8
disk_gb: 100
production:
overlay_path: overlays/prod
resource_limits:
cpu_cores: 8
memory_gb: 16
disk_gb: 200
# SignalCore SLO specifications
slo_specifications:
critical:
availability: 99.95
latency_p99: 100ms
recovery_time: 30s
max_downtime: 4h/month
high:
availability: 99.9
latency_p99: 500ms
recovery_time: 2m
max_downtime: 8h/month
standard:
availability: 99.5
latency_p99: 2s
recovery_time: 5m
max_downtime: 36h/month
# Port allocations (CI validation)
port_registry:
message_brokers: [4222, 8222, 6222]
cache_cluster: [18000-18002, 11211-11213]
databases: [8182]
web_services: [3001, 8080, 8090]
health_checks: [8091-8099]
# Generator configurations
generators:
supervisord:
template_path: templates/supervisord.conf.j2
output_path: services/supervisord.conf
validation: schema/supervisord.yaml
runbooks:
template_path: templates/runbook.md.j2
output_path: docs/runbooks
format: markdown
systemd_compat:
enabled: false
reason: "SignalCore runtime uses supervisord"
# Syncthing narrow usage policy
syncthing:
enabled: true
data_classes: [CLASS_B, CLASS_C]
excluded_paths:
- "/.git/*"
- "/logs/*"
- "/cache/dragonfly_data/*"
- "*.tmp"
- "*.log"
bandwidth_limits:
class_b: 50MB/s
class_c: 20MB/s
schedule:
class_b: "00:00-23:59"
class_c: "22:00-06:00"
# CI validation gates
validation:
schema_validation: true
port_conflict_check: true
dependency_validation: true
runbook_generation: true
slo_compliance_check: true
codeowners_validation: true
# Operational metadata
operations:
backup_strategy: event-sourced
disaster_recovery: multi-region
monitoring_stack: prometheus+grafana
logging_format: structured-json
observability_level: trace