adaptai / platform /dataops /dto /cache /dto_cache_client.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
fd357f4 verified
#!/usr/bin/env python3
"""
DTO Cache Client - Interface to Dragonfly for live status caching
"""
import json
import redis
from typing import Dict, Any, Optional, List
from datetime import datetime, timezone
class DTOCacheClient:
def __init__(self, host: str = "localhost", port: int = 18000,
password: Optional[str] = None):
self.host = host
self.port = port
self.password = password
self.redis_client = None
def connect(self) -> bool:
"""Connect to Dragonfly server"""
try:
self.redis_client = redis.Redis(
host=self.host,
port=self.port,
password=self.password,
decode_responses=True,
socket_timeout=5,
socket_connect_timeout=5
)
# Test connection
self.redis_client.ping()
print(f"βœ… Connected to Dragonfly at {self.host}:{self.port}")
return True
except redis.ConnectionError as e:
print(f"❌ Failed to connect to Dragonfly: {e}")
return False
except Exception as e:
print(f"❌ Error connecting to Dragonfly: {e}")
return False
def update_run_status(self, run_id: str, status: str,
metadata: Optional[Dict[str, Any]] = None) -> bool:
"""Update run status in cache"""
if not self.redis_client:
if not self.connect():
return False
try:
key = f"dto:run:{run_id}:status"
timestamp = int(datetime.now(timezone.utc).timestamp())
status_data = {
"status": status,
"updated_at": timestamp,
"run_id": run_id
}
if metadata:
status_data.update(metadata)
# Store as hash with TTL
self.redis_client.hset(key, mapping=status_data)
self.redis_client.expire(key, 3600) # 1 hour TTL
print(f"βœ… Updated run status: {run_id} -> {status}")
return True
except Exception as e:
print(f"❌ Error updating run status: {e}")
return False
def get_run_status(self, run_id: str) -> Optional[Dict[str, Any]]:
"""Get run status from cache"""
if not self.redis_client:
if not self.connect():
return None
try:
key = f"dto:run:{run_id}:status"
status_data = self.redis_client.hgetall(key)
if status_data:
# Convert numeric values
if 'updated_at' in status_data:
status_data['updated_at'] = int(status_data['updated_at'])
if 'progress_percent' in status_data:
status_data['progress_percent'] = float(status_data['progress_percent'])
return status_data
else:
return None
except Exception as e:
print(f"❌ Error getting run status: {e}")
return None
def update_run_progress(self, run_id: str, progress_percent: float,
transferred_bytes: int, total_bytes: int,
throughput_mbps: float) -> bool:
"""Update run progress metrics"""
if not self.redis_client:
if not self.connect():
return False
try:
key = f"dto:run:{run_id}:progress"
timestamp = int(datetime.now(timezone.utc).timestamp())
progress_data = {
"progress_percent": progress_percent,
"transferred_bytes": transferred_bytes,
"total_bytes": total_bytes,
"throughput_mbps": throughput_mbps,
"updated_at": timestamp
}
# Store as hash with TTL
self.redis_client.hset(key, mapping=progress_data)
self.redis_client.expire(key, 3600) # 1 hour TTL
print(f"βœ… Updated run progress: {run_id} -> {progress_percent}%")
return True
except Exception as e:
print(f"❌ Error updating run progress: {e}")
return False
def add_artifact(self, run_id: str, artifact_type: str, artifact_path: str) -> bool:
"""Add artifact reference to cache"""
if not self.redis_client:
if not self.connect():
return False
try:
key = f"dto:run:{run_id}:artifacts:{artifact_type}"
self.redis_client.sadd(key, artifact_path)
self.redis_client.expire(key, 86400) # 24 hours TTL
print(f"βœ… Added artifact: {run_id} -> {artifact_type}: {artifact_path}")
return True
except Exception as e:
print(f"❌ Error adding artifact: {e}")
return False
def get_artifacts(self, run_id: str, artifact_type: str) -> List[str]:
"""Get artifacts for a run"""
if not self.redis_client:
if not self.connect():
return []
try:
key = f"dto:run:{run_id}:artifacts:{artifact_type}"
artifacts = self.redis_client.smembers(key)
return list(artifacts) if artifacts else []
except Exception as e:
print(f"❌ Error getting artifacts: {e}")
return []
def publish_alert(self, job_id: str, alert_type: str, message: str,
severity: str = "warning") -> bool:
"""Publish alert to cache"""
if not self.redis_client:
if not self.connect():
return False
try:
key = f"dto:alerts:{job_id}"
alert_data = {
"type": alert_type,
"message": message,
"severity": severity,
"timestamp": int(datetime.utcnow().timestamp())
}
# Store as hash with TTL
self.redis_client.hset(key, mapping=alert_data)
self.redis_client.expire(key, 172800) # 48 hours TTL
# Also publish to pub/sub for real-time notifications
self.redis_client.publish("dto:alerts", json.dumps(alert_data))
print(f"βœ… Published alert: {job_id} -> {alert_type}: {message}")
return True
except Exception as e:
print(f"❌ Error publishing alert: {e}")
return False
# Test function
def test_cache_connectivity():
"""Test Dragonfly cache connectivity"""
client = DTOCacheClient()
if client.connect():
# Test run status update
test_run_id = "test-run-001"
# Update status
client.update_run_status(test_run_id, "IN_PROGRESS", {
"manifest_path": "/test/manifest.yaml",
"data_class": "CLASS_A"
})
# Update progress
client.update_run_progress(test_run_id, 25.5, 26843545600, 107374182400, 604.0)
# Add artifact
client.add_artifact(test_run_id, "logs", "/data/adaptai/platform/dataops/dto/logs/test-run-001.log")
# Get status back
status = client.get_run_status(test_run_id)
print(f"Retrieved status: {status}")
# Get artifacts
artifacts = client.get_artifacts(test_run_id, "logs")
print(f"Retrieved artifacts: {artifacts}")
# Publish alert
client.publish_alert("test-job-001", "SLO_BREACH", "Throughput below expected 500 Mbps", "critical")
print("βœ… All cache operations completed successfully")
return True
else:
print("❌ Cache connectivity test failed")
return False
if __name__ == "__main__":
print("Testing DTO Cache Client...")
print("=" * 50)
test_cache_connectivity()