File size: 3,525 Bytes
fd357f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
# Dragonfly Cache Configuration for DTO Status
# Live status cache for Data Transfer Operations
# Cluster Configuration
cluster:
enabled: true
nodes:
- "localhost:18000"
- "localhost:18001"
- "localhost:18002"
replication_factor: 2
# DTO-specific key patterns and TTLs
key_patterns:
# Run status and progress
run_status: "dto:run:{run_id}:status"
run_progress: "dto:run:{run_id}:progress"
run_artifacts: "dto:run:{run_id}:artifacts"
# Job queue and scheduling
job_queue: "dto:jobs:pending"
job_in_progress: "dto:jobs:in_progress"
job_completed: "dto:jobs:completed"
# Alerting and notifications
alerts: "dto:alerts:{job_id}"
notifications: "dto:notifications:{user_id}"
# Performance metrics cache
metrics_throughput: "dto:metrics:throughput:{run_id}"
metrics_checksum_time: "dto:metrics:checksum_time:{run_id}"
metrics_validation_rate: "dto:metrics:validation_rate:{run_id}"
# Time-to-Live configurations
ttl_config:
# Short-lived: status and progress (1 hour)
run_status_ttl: 3600
run_progress_ttl: 3600
# Medium-lived: job queues (24 hours)
job_queue_ttl: 86400
job_in_progress_ttl: 86400
# Long-lived: completed jobs (7 days)
job_completed_ttl: 604800
# Alert retention (48 hours)
alerts_ttl: 172800
# Metrics cache (1 hour)
metrics_ttl: 3600
# Memory and performance settings
performance:
max_memory_per_node: "50GB"
max_client_connections: 10000
pipeline_size: 100
# DTO-specific optimizations
hash_max_ziplist_entries: 512
hash_max_ziplist_value: 64
set_max_intset_entries: 512
# Persistence settings
persistence:
enabled: true
dir: "/data/adaptai/platform/dataops/dto/cache/dragonfly_data"
snapshot_interval: 300 # 5 minutes
snapshot_threshold: 1000
# Monitoring and observability
monitoring:
prometheus_enabled: true
prometheus_port: 18080
stats_interval: 60
# DTO-specific metrics
track_hit_rate: true
track_memory_usage: true
track_command_stats: true
# Security and authentication
security:
requirepass: "${DRAGONFLY_PASSWORD}"
# DTO-specific ACLs
acl_rules:
- user: "dto-producer"
passwords: ["${DRAGONFLY_PRODUCER_PASSWORD}"]
commands: ["SET", "HSET", "EXPIRE", "PUBLISH"]
keys: ["dto:run:*", "dto:jobs:*", "dto:metrics:*"]
- user: "dto-consumer"
passwords: ["${DRAGONFLY_CONSUMER_PASSWORD}"]
commands: ["GET", "HGETALL", "LRANGE", "SUBSCRIBE"]
keys: ["dto:run:*", "dto:jobs:*", "dto:metrics:*", "dto:alerts:*"]
- user: "dto-admin"
passwords: ["${DRAGONFLY_ADMIN_PASSWORD}"]
commands: ["*"]]
# Backup and disaster recovery
backup:
enabled: true
dir: "/data/adaptai/platform/dataops/dto/cache/backups"
interval: 3600 # 1 hour
retain_count: 24
# DTO-specific Lua scripts
lua_scripts:
update_run_status: |
local run_id = ARGV[1]
local status = ARGV[2]
local timestamp = ARGV[3]
redis.call('HSET', 'dto:run:' .. run_id .. ':status',
'status', status,
'updated_at', timestamp)
redis.call('EXPIRE', 'dto:run:' .. run_id .. ':status', 3600)
return redis.status_reply('OK')
add_artifact: |
local run_id = ARGV[1]
local artifact_type = ARGV[2]
local artifact_path = ARGV[3]
redis.call('SADD', 'dto:run:' .. run_id .. ':artifacts:' .. artifact_type, artifact_path)
redis.call('EXPIRE', 'dto:run:' .. run_id .. ':artifacts:' .. artifact_type, 86400)
return redis.status_reply('OK') |