File size: 7,517 Bytes
fd357f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
-- Cassandra Schema for DTO Durable Storage
-- Data Transfer Operations run history and metrics

-- Keyspace for DTO operations
CREATE KEYSPACE IF NOT EXISTS dto_operations 
WITH replication = {
    'class': 'SimpleStrategy',
    'replication_factor': 2
}
AND durable_writes = true;

-- Table for run metadata and status
CREATE TABLE IF NOT EXISTS dto_operations.runs (
    run_id text,
    job_id text,
    manifest_path text,
    data_class text,          -- CLASS_A, CLASS_B, CLASS_C
    environment text,         -- staging, prod, archive
    status text,              -- planned, in_progress, completed, failed, rolled_back
    start_time timestamp,
    end_time timestamp,
    initiated_by text,
    approvers set<text>,
    data_size_bytes bigint,
    estimated_duration text,
    final_status text,
    total_duration_seconds int,
    average_throughput_mbps double,
    artifacts list<text>,
    metadata map<text, text>,
    PRIMARY KEY ((run_id), start_time)
) WITH CLUSTERING ORDER BY (start_time DESC)
   AND default_time_to_live = 2592000; -- 30 days TTL

-- Table for performance metrics (time-series data)
CREATE TABLE IF NOT EXISTS dto_operations.metrics (
    run_id text,
    metric_name text,         -- throughput_mbps, checksum_time_seconds, validation_passed
    bucket timestamp,         -- minute precision bucket
    timestamp timestamp,
    value double,
    labels map<text, text>,   -- source_host, target_host, transfer_method
    PRIMARY KEY ((run_id, metric_name), bucket, timestamp)
) WITH CLUSTERING ORDER BY (bucket DESC, timestamp DESC)
   AND default_time_to_live = 604800; -- 7 days TTL

-- Table for event history (durable event store)
CREATE TABLE IF NOT EXISTS dto_operations.events (
    event_id text,
    event_type text,          -- RUN_PLANNED, PREFLIGHT_PASSED, etc.
    timestamp timestamp,
    run_id text,
    job_id text,
    payload text,             -- JSON payload
    PRIMARY KEY ((run_id), timestamp, event_type)
) WITH CLUSTERING ORDER BY (timestamp DESC, event_type ASC)
   AND default_time_to_live = 2592000; -- 30 days TTL

-- Table for SLI (Service Level Indicator) tracking
CREATE TABLE IF NOT EXISTS dto_operations.slis (
    sli_name text,            -- throughput_mbps, validation_success_rate
    run_id text,
    timestamp timestamp,
    expected_value double,
    actual_value double,
    breach_duration_seconds int,
    PRIMARY KEY ((sli_name), run_id, timestamp)
) WITH CLUSTERING ORDER BY (run_id DESC, timestamp DESC)
   AND default_time_to_live = 2592000; -- 30 days TTL

-- Table for artifact references
CREATE TABLE IF NOT EXISTS dto_operations.artifacts (
    run_id text,
    artifact_type text,       -- logs, reports, snapshots, checksums
    artifact_path text,
    created_at timestamp,
    size_bytes bigint,
    checksum text,
    PRIMARY KEY ((run_id, artifact_type), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC)
   AND default_time_to_live = 2592000; -- 30 days TTL

-- Table for job queue state (durable job tracking)
CREATE TABLE IF NOT EXISTS dto_operations.jobs (
    job_id text,
    status text,              -- pending, in_progress, completed, failed
    run_id text,
    created_at timestamp,
    updated_at timestamp,
    priority int,             -- 1-10 priority
    retry_count int,
    error_message text,
    next_retry_time timestamp,
    PRIMARY KEY ((job_id), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC)
   AND default_time_to_live = 604800; -- 7 days TTL

-- Materialized views for common queries

-- View for finding runs by status
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_status AS
    SELECT run_id, job_id, status, start_time, environment, data_class
    FROM dto_operations.runs
    WHERE status IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
    PRIMARY KEY ((status), run_id, start_time)
    WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);

-- View for finding runs by environment
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_environment AS
    SELECT run_id, job_id, status, start_time, environment, data_class
    FROM dto_operations.runs
    WHERE environment IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
    PRIMARY KEY ((environment), run_id, start_time)
    WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);

-- View for finding runs by data class
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.runs_by_data_class AS
    SELECT run_id, job_id, status, start_time, environment, data_class
    FROM dto_operations.runs
    WHERE data_class IS NOT NULL AND run_id IS NOT NULL AND start_time IS NOT NULL
    PRIMARY KEY ((data_class), run_id, start_time)
    WITH CLUSTERING ORDER BY (run_id DESC, start_time DESC);

-- View for recent runs across all environments
CREATE MATERIALIZED VIEW IF NOT EXISTS dto_operations.recent_runs AS
    SELECT run_id, job_id, status, start_time, environment, data_class
    FROM dto_operations.runs
    WHERE run_id IS NOT NULL AND start_time IS NOT NULL
    PRIMARY KEY ((environment, data_class), start_time, run_id)
    WITH CLUSTERING ORDER BY (start_time DESC);

-- Indexes for better query performance
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (job_id);
CREATE INDEX IF NOT EXISTS ON dto_operations.runs (initiated_by);
CREATE INDEX IF NOT EXISTS ON dto_operations.events (event_type);
CREATE INDEX IF NOT EXISTS ON dto_operations.events (job_id);
CREATE INDEX IF NOT EXISTS ON dto_operations.metrics (metric_name);
CREATE INDEX IF NOT EXISTS ON dto_operations.artifacts (artifact_type);

-- User-defined types for complex data
CREATE TYPE IF NOT EXISTS dto_operations.throughput_sample (
    timestamp timestamp,
    value double,
    window_seconds int
);

CREATE TYPE IF NOT EXISTS dto_operations.checksum_result (
    algorithm text,
    expected text,
    actual text,
    matched boolean,
    verified_at timestamp
);

-- Functions for common operations
CREATE FUNCTION IF NOT EXISTS dto_operations.avg_throughput(throughputs list<double>)
    RETURNS NULL ON NULL INPUT
    RETURNS double
    LANGUAGE java
    AS 'if (throughputs == null || throughputs.isEmpty()) return 0.0; 
        double sum = 0.0; 
        for (double t : throughputs) sum += t; 
        return sum / throughputs.size();';

CREATE FUNCTION IF NOT EXISTS dto_operations.percent_complete(transferred bigint, total bigint)
    RETURNS NULL ON NULL INPUT
    RETURNS double
    LANGUAGE java
    AS 'if (total == 0) return 0.0; 
        return (double) transferred / total * 100.0;';

-- Insert sample data for testing
INSERT INTO dto_operations.runs (
    run_id, job_id, manifest_path, data_class, environment, status,
    start_time, initiated_by, data_size_bytes, estimated_duration
) VALUES (
    'test-run-001', 'test-job-001', '/manifests/class_a/test.yaml', 'CLASS_A', 'staging', 'completed',
    toTimestamp(now()), 'prometheus', 107374182400, '2h'
);

INSERT INTO dto_operations.metrics (
    run_id, metric_name, bucket, timestamp, value, labels
) VALUES (
    'test-run-001', 'throughput_mbps', 
    dateOf(now()) - 1 * 60 * 1000, -- 1 minute bucket
    toTimestamp(now()), 604.0,
    {'source_host': 'vast2', 'target_host': 'vast1', 'transfer_method': 'ssh+dd'}
);

-- Grant permissions to DTO service account
CREATE ROLE IF NOT EXISTS dto_service WITH LOGIN = true AND PASSWORD = '${CASSANDRA_DTO_PASSWORD}';
GRANT ALL PERMISSIONS ON KEYSPACE dto_operations TO dto_service;
GRANT EXECUTE ON ALL FUNCTIONS IN KEYSPACE dto_operations TO dto_service;