|
|
|
|
|
|
|
|
|
|
|
|
|
|
CREATE KEYSPACE IF NOT EXISTS dto_operations |
|
|
WITH replication = { |
|
|
'class': 'SimpleStrategy', |
|
|
'replication_factor': 2 |
|
|
} |
|
|
AND durable_writes = true; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.runs ( |
|
|
run_id text, |
|
|
job_id text, |
|
|
manifest_path text, |
|
|
data_class text, |
|
|
environment text, |
|
|
status text, |
|
|
start_time timestamp, |
|
|
end_time timestamp, |
|
|
initiated_by text, |
|
|
approvers set<text>, |
|
|
data_size_bytes bigint, |
|
|
estimated_duration text, |
|
|
final_status text, |
|
|
total_duration_seconds int, |
|
|
average_throughput_mbps double, |
|
|
artifacts list<text>, |
|
|
metadata map<text, text>, |
|
|
PRIMARY KEY ((run_id), start_time) |
|
|
) WITH CLUSTERING ORDER BY (start_time DESC) |
|
|
AND default_time_to_live = 2592000; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.metrics ( |
|
|
run_id text, |
|
|
metric_name text, |
|
|
bucket timestamp, |
|
|
timestamp timestamp, |
|
|
value double, |
|
|
labels map<text, text>, |
|
|
PRIMARY KEY ((run_id, metric_name), bucket, timestamp) |
|
|
) WITH CLUSTERING ORDER BY (bucket DESC, timestamp DESC) |
|
|
AND default_time_to_live = 604800; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.events ( |
|
|
event_id text, |
|
|
event_type text, |
|
|
timestamp timestamp, |
|
|
run_id text, |
|
|
job_id text, |
|
|
payload text, |
|
|
PRIMARY KEY ((run_id), timestamp, event_type) |
|
|
) WITH CLUSTERING ORDER BY (timestamp DESC, event_type ASC) |
|
|
AND default_time_to_live = 2592000; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.slis ( |
|
|
sli_name text, |
|
|
run_id text, |
|
|
timestamp timestamp, |
|
|
expected_value double, |
|
|
actual_value double, |
|
|
breach_duration_seconds int, |
|
|
PRIMARY KEY ((sli_name), run_id, timestamp) |
|
|
) WITH CLUSTERING ORDER BY (run_id DESC, timestamp DESC) |
|
|
AND default_time_to_live = 2592000; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.artifacts ( |
|
|
run_id text, |
|
|
artifact_type text, |
|
|
artifact_path text, |
|
|
created_at timestamp, |
|
|
size_bytes bigint, |
|
|
checksum text, |
|
|
PRIMARY KEY ((run_id, artifact_type), created_at) |
|
|
) WITH CLUSTERING ORDER BY (created_at DESC) |
|
|
AND default_time_to_live = 2592000; |
|
|
|
|
|
|
|
|
CREATE TABLE IF NOT EXISTS dto_operations.jobs ( |
|
|
job_id text, |
|
|
status text, |
|
|
run_id text, |
|
|
created_at timestamp, |
|
|
updated_at timestamp, |
|
|
priority int, |
|
|
retry_count int, |
|
|
error_message text, |
|
|
next_retry_time timestamp, |
|
|
PRIMARY KEY ((job_id), created_at) |
|
|
) WITH CLUSTERING ORDER BY (created_at DESC) |
|
|
AND default_time_to_live = 604800; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_status AS |
|
|
SELECT run_id, job_id, status, start_time, environment, data_class |
|
|
FROM dto_operations.runs |
|
|
WHERE status IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL |
|
|
PRIMARY KEY ((status), run_id, start_time) |
|
|
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC); |
|
|
|
|
|
|
|
|
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_environment AS |
|
|
SELECT run_id, job_id, status, start_time, environment, data_class |
|
|
FROM dto_operations.runs |
|
|
WHERE environment IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL |
|
|
PRIMARY KEY ((environment), run_id, start_time) |
|
|
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC); |
|
|
|
|
|
|
|
|
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_data_class AS |
|
|
SELECT run_id, job_id, status, start_time, environment, data_class |
|
|
FROM dto_operations.runs |
|
|
WHERE data_class IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL |
|
|
PRIMARY KEY ((data_class), run_id, start_time) |
|
|
WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC); |
|
|
|
|
|
|
|
|
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.recent_runs AS |
|
|
SELECT run_id, job_id, status, start_time, environment, data_class |
|
|
FROM dto_operations.runs |
|
|
WHERE run_id IS NOT NULL AND start_time IS NOT NULL |
|
|
PRIMARY KEY ((environment, data_class), start_time, run_id) |
|
|
WITH CLUSTERING ORDER BY (start_time DESC); |
|
|
|
|
|
|
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (job_id); |
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (initiated_by); |
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.events (event_type); |
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.events (job_id); |
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.metrics (metric_name); |
|
|
CREATE INDEX IF NOT EXISTS ON dto_operations.artifacts (artifact_type); |
|
|
|
|
|
|
|
|
CREATE TYPE IF NOT EXISTS dto_operations.throughput_sample ( |
|
|
timestamp timestamp, |
|
|
value double, |
|
|
window_seconds int |
|
|
); |
|
|
|
|
|
CREATE TYPE IF NOT EXISTS dto_operations.checksum_result ( |
|
|
algorithm text, |
|
|
expected text, |
|
|
actual text, |
|
|
matched boolean, |
|
|
verified_at timestamp |
|
|
); |
|
|
|
|
|
|
|
|
CREATE FUNCTION IF NOT EXISTS dto_operations.avg_throughput(throughputs list<double>) |
|
|
RETURNS NULL ON NULL INPUT |
|
|
RETURNS double |
|
|
LANGUAGE java |
|
|
AS 'if (throughputs == null || throughputs.isEmpty()) return 0.0; |
|
|
double sum = 0.0; |
|
|
for (double t : throughputs) sum += t; |
|
|
return sum / throughputs.size();'; |
|
|
|
|
|
CREATE FUNCTION IF NOT EXISTS dto_operations.percent_complete(transferred bigint, total bigint) |
|
|
RETURNS NULL ON NULL INPUT |
|
|
RETURNS double |
|
|
LANGUAGE java |
|
|
AS 'if (total == 0) return 0.0; |
|
|
return (double) transferred / total * 100.0;'; |
|
|
|
|
|
|
|
|
INSERT INTO dto_operations.runs ( |
|
|
run_id, job_id, manifest_path, data_class, environment, status, |
|
|
start_time, initiated_by, data_size_bytes, estimated_duration |
|
|
) VALUES ( |
|
|
'test-run-001', 'test-job-001', '/manifests/class_a/test.yaml', 'CLASS_A', 'staging', 'completed', |
|
|
toTimestamp(now()), 'prometheus', 107374182400, '2h' |
|
|
); |
|
|
|
|
|
INSERT INTO dto_operations.metrics ( |
|
|
run_id, metric_name, bucket, timestamp, value, labels |
|
|
) VALUES ( |
|
|
'test-run-001', 'throughput_mbps', |
|
|
dateOf(now()) - 1 * 60 * 1000, |
|
|
toTimestamp(now()), 604.0, |
|
|
{'source_host': 'vast2', 'target_host': 'vast1', 'transfer_method': 'ssh+dd'} |
|
|
); |
|
|
|
|
|
|
|
|
CREATE ROLE IF NOT EXISTS dto_service WITH LOGIN = true AND PASSWORD = '${CASSANDRA_DTO_PASSWORD}'; |
|
|
GRANT ALL PERMISSIONS ON KEYSPACE dto_operations TO dto_service; |
|
|
GRANT EXECUTE ON ALL FUNCTIONS IN KEYSPACE dto_operations TO dto_service; |