adaptai / platform /dataops /dto /cache /dragonfly_config.yaml
ADAPT-Chase's picture
Add files using upload-large-folder tool
fd357f4 verified
# Dragonfly Cache Configuration for DTO Status
# Live status cache for Data Transfer Operations
# Cluster Configuration
cluster:
enabled: true
nodes:
- "localhost:18000"
- "localhost:18001"
- "localhost:18002"
replication_factor: 2
# DTO-specific key patterns and TTLs
key_patterns:
# Run status and progress
run_status: "dto:run:{run_id}:status"
run_progress: "dto:run:{run_id}:progress"
run_artifacts: "dto:run:{run_id}:artifacts"
# Job queue and scheduling
job_queue: "dto:jobs:pending"
job_in_progress: "dto:jobs:in_progress"
job_completed: "dto:jobs:completed"
# Alerting and notifications
alerts: "dto:alerts:{job_id}"
notifications: "dto:notifications:{user_id}"
# Performance metrics cache
metrics_throughput: "dto:metrics:throughput:{run_id}"
metrics_checksum_time: "dto:metrics:checksum_time:{run_id}"
metrics_validation_rate: "dto:metrics:validation_rate:{run_id}"
# Time-to-Live configurations
ttl_config:
# Short-lived: status and progress (1 hour)
run_status_ttl: 3600
run_progress_ttl: 3600
# Medium-lived: job queues (24 hours)
job_queue_ttl: 86400
job_in_progress_ttl: 86400
# Long-lived: completed jobs (7 days)
job_completed_ttl: 604800
# Alert retention (48 hours)
alerts_ttl: 172800
# Metrics cache (1 hour)
metrics_ttl: 3600
# Memory and performance settings
performance:
max_memory_per_node: "50GB"
max_client_connections: 10000
pipeline_size: 100
# DTO-specific optimizations
hash_max_ziplist_entries: 512
hash_max_ziplist_value: 64
set_max_intset_entries: 512
# Persistence settings
persistence:
enabled: true
dir: "/data/adaptai/platform/dataops/dto/cache/dragonfly_data"
snapshot_interval: 300 # 5 minutes
snapshot_threshold: 1000
# Monitoring and observability
monitoring:
prometheus_enabled: true
prometheus_port: 18080
stats_interval: 60
# DTO-specific metrics
track_hit_rate: true
track_memory_usage: true
track_command_stats: true
# Security and authentication
security:
requirepass: "${DRAGONFLY_PASSWORD}"
# DTO-specific ACLs
acl_rules:
- user: "dto-producer"
passwords: ["${DRAGONFLY_PRODUCER_PASSWORD}"]
commands: ["SET", "HSET", "EXPIRE", "PUBLISH"]
keys: ["dto:run:*", "dto:jobs:*", "dto:metrics:*"]
- user: "dto-consumer"
passwords: ["${DRAGONFLY_CONSUMER_PASSWORD}"]
commands: ["GET", "HGETALL", "LRANGE", "SUBSCRIBE"]
keys: ["dto:run:*", "dto:jobs:*", "dto:metrics:*", "dto:alerts:*"]
- user: "dto-admin"
passwords: ["${DRAGONFLY_ADMIN_PASSWORD}"]
commands: ["*"]]
# Backup and disaster recovery
backup:
enabled: true
dir: "/data/adaptai/platform/dataops/dto/cache/backups"
interval: 3600 # 1 hour
retain_count: 24
# DTO-specific Lua scripts
lua_scripts:
update_run_status: |
local run_id = ARGV[1]
local status = ARGV[2]
local timestamp = ARGV[3]
redis.call('HSET', 'dto:run:' .. run_id .. ':status',
'status', status,
'updated_at', timestamp)
redis.call('EXPIRE', 'dto:run:' .. run_id .. ':status', 3600)
return redis.status_reply('OK')
add_artifact: |
local run_id = ARGV[1]
local artifact_type = ARGV[2]
local artifact_path = ARGV[3]
redis.call('SADD', 'dto:run:' .. run_id .. ':artifacts:' .. artifact_type, artifact_path)
redis.call('EXPIRE', 'dto:run:' .. run_id .. ':artifacts:' .. artifact_type, 86400)
return redis.status_reply('OK')