adaptai / platform /dataops /dto /database /cassandra_schema.cql
ADAPT-Chase's picture
Add files using upload-large-folder tool
fd357f4 verified
-- Cassandra Schema for DTO Durable Storage
-- Data Transfer Operations run history and metrics
-- Keyspace for DTO operations
CREATE KEYSPACE IF NOT EXISTS dto_operations
WITH replication = {
'class': 'SimpleStrategy',
'replication_factor': 2
}
AND durable_writes = true;
-- Table for run metadata and status
CREATE TABLE IF NOT EXISTS dto_operations.runs (
run_id text,
job_id text,
manifest_path text,
data_class text, -- CLASS_A, CLASS_B, CLASS_C
environment text, -- staging, prod, archive
status text, -- planned, in_progress, completed, failed, rolled_back
start_time timestamp,
end_time timestamp,
initiated_by text,
approvers set<text>,
data_size_bytes bigint,
estimated_duration text,
final_status text,
total_duration_seconds int,
average_throughput_mbps double,
artifacts list<text>,
metadata map<text, text>,
PRIMARY KEY ((run_id), start_time)
) WITH CLUSTERING ORDER BY (start_time DESC)
AND default_time_to_live = 2592000; -- 30 days TTL
-- Table for performance metrics (time-series data)
CREATE TABLE IF NOT EXISTS dto_operations.metrics (
run_id text,
metric_name text, -- throughput_mbps, checksum_time_seconds, validation_passed
bucket timestamp, -- minute precision bucket
timestamp timestamp,
value double,
labels map<text, text>, -- source_host, target_host, transfer_method
PRIMARY KEY ((run_id, metric_name), bucket, timestamp)
) WITH CLUSTERING ORDER BY (bucket DESC, timestamp DESC)
AND default_time_to_live = 604800; -- 7 days TTL
-- Table for event history (durable event store)
CREATE TABLE IF NOT EXISTS dto_operations.events (
event_id text,
event_type text, -- RUN_PLANNED, PREFLIGHT_PASSED, etc.
timestamp timestamp,
run_id text,
job_id text,
payload text, -- JSON payload
PRIMARY KEY ((run_id), timestamp, event_type)
) WITH CLUSTERING ORDER BY (timestamp DESC, event_type ASC)
AND default_time_to_live = 2592000; -- 30 days TTL
-- Table for SLI (Service Level Indicator) tracking
CREATE TABLE IF NOT EXISTS dto_operations.slis (
sli_name text, -- throughput_mbps, validation_success_rate
run_id text,
timestamp timestamp,
expected_value double,
actual_value double,
breach_duration_seconds int,
PRIMARY KEY ((sli_name), run_id, timestamp)
) WITH CLUSTERING ORDER BY (run_id DESC, timestamp DESC)
AND default_time_to_live = 2592000; -- 30 days TTL
-- Table for artifact references
CREATE TABLE IF NOT EXISTS dto_operations.artifacts (
run_id text,
artifact_type text, -- logs, reports, snapshots, checksums
artifact_path text,
created_at timestamp,
size_bytes bigint,
checksum text,
PRIMARY KEY ((run_id, artifact_type), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC)
AND default_time_to_live = 2592000; -- 30 days TTL
-- Table for job queue state (durable job tracking)
CREATE TABLE IF NOT EXISTS dto_operations.jobs (
job_id text,
status text, -- pending, in_progress, completed, failed
run_id text,
created_at timestamp,
updated_at timestamp,
priority int, -- 1-10 priority
retry_count int,
error_message text,
next_retry_time timestamp,
PRIMARY KEY ((job_id), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC)
AND default_time_to_live = 604800; -- 7 days TTL
-- Materialized views for common queries
-- View for finding runs by status
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_status AS
SELECT run_id, job_id, status, start_time, environment, data_class
FROM dto_operations.runs
WHERE status IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
PRIMARY KEY ((status), run_id, start_time)
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);
-- View for finding runs by environment
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_environment AS
SELECT run_id, job_id, status, start_time, environment, data_class
FROM dto_operations.runs
WHERE environment IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
PRIMARY KEY ((environment), run_id, start_time)
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);
-- View for finding runs by data class
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_data_class AS
SELECT run_id, job_id, status, start_time, environment, data_class
FROM dto_operations.runs
WHERE data_class IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
PRIMARY KEY ((data_class), run_id, start_time)
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);
-- View for recent runs across all environments
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.recent_runs AS
SELECT run_id, job_id, status, start_time, environment, data_class
FROM dto_operations.runs
WHERE run_id IS NOT NULL AND start_time IS NOT NULL
PRIMARY KEY ((environment, data_class), start_time, run_id)
WITH CLUSTERING ORDER BY (start_time DESC);
-- Indexes for better query performance
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (job_id);
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (initiated_by);
CREATE INDEX IF NOT EXISTS ON dto_operations.events (event_type);
CREATE INDEX IF NOT EXISTS ON dto_operations.events (job_id);
CREATE INDEX IF NOT EXISTS ON dto_operations.metrics (metric_name);
CREATE INDEX IF NOT EXISTS ON dto_operations.artifacts (artifact_type);
-- User-defined types for complex data
CREATE TYPE IF NOT EXISTS dto_operations.throughput_sample (
timestamp timestamp,
value double,
window_seconds int
);
CREATE TYPE IF NOT EXISTS dto_operations.checksum_result (
algorithm text,
expected text,
actual text,
matched boolean,
verified_at timestamp
);
-- Functions for common operations
CREATE FUNCTION IF NOT EXISTS dto_operations.avg_throughput(throughputs list<double>)
RETURNS NULL ON NULL INPUT
RETURNS double
LANGUAGE java
AS 'if (throughputs == null || throughputs.isEmpty()) return 0.0;
double sum = 0.0;
for (double t : throughputs) sum += t;
return sum / throughputs.size();';
CREATE FUNCTION IF NOT EXISTS dto_operations.percent_complete(transferred bigint, total bigint)
RETURNS NULL ON NULL INPUT
RETURNS double
LANGUAGE java
AS 'if (total == 0) return 0.0;
return (double) transferred / total * 100.0;';
-- Insert sample data for testing
INSERT INTO dto_operations.runs (
run_id, job_id, manifest_path, data_class, environment, status,
start_time, initiated_by, data_size_bytes, estimated_duration
) VALUES (
'test-run-001', 'test-job-001', '/manifests/class_a/test.yaml', 'CLASS_A', 'staging', 'completed',
toTimestamp(now()), 'prometheus', 107374182400, '2h'
);
INSERT INTO dto_operations.metrics (
run_id, metric_name, bucket, timestamp, value, labels
) VALUES (
'test-run-001', 'throughput_mbps',
dateOf(now()) - 1 * 60 * 1000, -- 1 minute bucket
toTimestamp(now()), 604.0,
{'source_host': 'vast2', 'target_host': 'vast1', 'transfer_method': 'ssh+dd'}
);
-- Grant permissions to DTO service account
CREATE ROLE IF NOT EXISTS dto_service WITH LOGIN = true AND PASSWORD = '${CASSANDRA_DTO_PASSWORD}';
GRANT ALL PERMISSIONS ON KEYSPACE dto_operations TO dto_service;
GRANT EXECUTE ON ALL FUNCTIONS IN KEYSPACE dto_operations TO dto_service;