Add files using upload-large-folder tool
Browse files- india-h200-1-data/archimedes_integration_test.py +282 -0
- india-h200-1-data/cross-domain-integration.log +26 -0
- india-h200-1-data/database_access_response.md +195 -0
- india-h200-1-data/elizabeth_12h_training_plan.sh +47 -0
- india-h200-1-data/etl-team-handoff.md +223 -0
- models/.env +14 -0
- models/.env.bak +7 -0
- models/.env.old +8 -0
- platform/aiml/.env +14 -0
- platform/aiml/elizabeth/e-1-first_session/elizabeth-repo/.env +42 -0
- platform/aiml/elizabeth/e-1-first_session/novacore-quartz-glm45v/.env.cloudflare +25 -0
- platform/aiml/etl/corpus-pipeline/.env +34 -0
- platform/aiml/mlops/.env.template +19 -0
- platform/aiml/mlops/death_march/.env +2 -0
- platform/aiml/mlops/death_march/.env.template +20 -0
- platform/dbops/.env +4 -0
- platform/dbops/archive/databases_old/data/home/x/.claude-code-router/.env +12 -0
- platform/dbops/archive/databases_old/data/home/x/india-h200-1-workspace/elizabeth-repo/.env +42 -0
- platform/dbops/archive/databases_old/data/home/x/india-h200-1-workspace/novacore-quartz-glm45v/.env.cloudflare +25 -0
- tool_server/.env +157 -0
india-h200-1-data/archimedes_integration_test.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Archimedes Memory Integration Test
|
| 4 |
+
Comprehensive test of memory integration and session continuity
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
import json
|
| 10 |
+
import redis
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from typing import Dict, List, Optional, Any
|
| 13 |
+
|
| 14 |
+
class ArchimedesIntegrationTest:
|
| 15 |
+
"""Comprehensive integration test for Archimedes memory system"""
|
| 16 |
+
|
| 17 |
+
def __init__(self):
|
| 18 |
+
self.nova_id = "archimedes_001"
|
| 19 |
+
self.session_id = f"test_session_{int(datetime.now().timestamp())}"
|
| 20 |
+
|
| 21 |
+
# Memory clients
|
| 22 |
+
self.dragonfly = redis.Redis(host='localhost', port=18000, decode_responses=True)
|
| 23 |
+
self.redis = redis.Redis(host='localhost', port=18010, decode_responses=True)
|
| 24 |
+
|
| 25 |
+
# Test results
|
| 26 |
+
self.results = {
|
| 27 |
+
"services": {},
|
| 28 |
+
"memory_operations": {},
|
| 29 |
+
"session_continuity": {},
|
| 30 |
+
"overall_status": "PENDING"
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
def test_services(self):
|
| 34 |
+
"""Test all memory services"""
|
| 35 |
+
print("🧪 Testing Memory Services...")
|
| 36 |
+
|
| 37 |
+
# Test DragonFly
|
| 38 |
+
try:
|
| 39 |
+
self.dragonfly.ping()
|
| 40 |
+
self.results["services"]["dragonfly"] = {"status": "OK", "port": 18000}
|
| 41 |
+
print("✅ DragonFly: OK")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
self.results["services"]["dragonfly"] = {"status": "FAILED", "error": str(e)}
|
| 44 |
+
print(f"❌ DragonFly: FAILED - {e}")
|
| 45 |
+
|
| 46 |
+
# Test Redis
|
| 47 |
+
try:
|
| 48 |
+
self.redis.ping()
|
| 49 |
+
self.results["services"]["redis"] = {"status": "OK", "port": 18010}
|
| 50 |
+
print("✅ Redis: OK")
|
| 51 |
+
except Exception as e:
|
| 52 |
+
self.results["services"]["redis"] = {"status": "FAILED", "error": str(e)}
|
| 53 |
+
print(f"❌ Redis: FAILED - {e}")
|
| 54 |
+
|
| 55 |
+
# Test Qdrant (vector memory)
|
| 56 |
+
try:
|
| 57 |
+
import requests
|
| 58 |
+
response = requests.get("http://localhost:17000/collections", timeout=5)
|
| 59 |
+
if response.status_code == 200:
|
| 60 |
+
self.results["services"]["qdrant"] = {"status": "OK", "port": 17000}
|
| 61 |
+
print("✅ Qdrant: OK")
|
| 62 |
+
else:
|
| 63 |
+
self.results["services"]["qdrant"] = {"status": "FAILED", "error": f"HTTP {response.status_code}"}
|
| 64 |
+
print(f"❌ Qdrant: FAILED - HTTP {response.status_code}")
|
| 65 |
+
except Exception as e:
|
| 66 |
+
self.results["services"]["qdrant"] = {"status": "FAILED", "error": str(e)}
|
| 67 |
+
print(f"❌ Qdrant: FAILED - {e}")
|
| 68 |
+
|
| 69 |
+
def test_memory_operations(self):
|
| 70 |
+
"""Test basic memory operations"""
|
| 71 |
+
print("\n🧪 Testing Memory Operations...")
|
| 72 |
+
|
| 73 |
+
# Test DragonFly operations (working memory)
|
| 74 |
+
try:
|
| 75 |
+
test_key = f"{self.nova_id}:test:working_memory"
|
| 76 |
+
test_data = {
|
| 77 |
+
"timestamp": datetime.now().isoformat(),
|
| 78 |
+
"test_type": "working_memory",
|
| 79 |
+
"status": "active"
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
self.dragonfly.hset(test_key, mapping=test_data)
|
| 83 |
+
self.dragonfly.expire(test_key, 60)
|
| 84 |
+
|
| 85 |
+
# Verify write
|
| 86 |
+
retrieved = self.dragonfly.hgetall(test_key)
|
| 87 |
+
if retrieved:
|
| 88 |
+
self.results["memory_operations"]["dragonfly_write"] = {"status": "OK"}
|
| 89 |
+
print("✅ DragonFly write: OK")
|
| 90 |
+
else:
|
| 91 |
+
self.results["memory_operations"]["dragonfly_write"] = {"status": "FAILED"}
|
| 92 |
+
print("❌ DragonFly write: FAILED")
|
| 93 |
+
|
| 94 |
+
except Exception as e:
|
| 95 |
+
self.results["memory_operations"]["dragonfly_write"] = {"status": "FAILED", "error": str(e)}
|
| 96 |
+
print(f"❌ DragonFly write: FAILED - {e}")
|
| 97 |
+
|
| 98 |
+
# Test Redis operations (persistent memory)
|
| 99 |
+
try:
|
| 100 |
+
test_key = f"{self.nova_id}:test:persistent_memory"
|
| 101 |
+
test_data = {
|
| 102 |
+
"timestamp": datetime.now().isoformat(),
|
| 103 |
+
"test_type": "persistent_memory",
|
| 104 |
+
"session_id": self.session_id,
|
| 105 |
+
"data": "test_persistent_storage"
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
self.redis.set(test_key, json.dumps(test_data))
|
| 109 |
+
self.redis.expire(test_key, 60)
|
| 110 |
+
|
| 111 |
+
# Verify write
|
| 112 |
+
retrieved = self.redis.get(test_key)
|
| 113 |
+
if retrieved:
|
| 114 |
+
data = json.loads(retrieved)
|
| 115 |
+
if data["session_id"] == self.session_id:
|
| 116 |
+
self.results["memory_operations"]["redis_write"] = {"status": "OK"}
|
| 117 |
+
print("✅ Redis write: OK")
|
| 118 |
+
else:
|
| 119 |
+
self.results["memory_operations"]["redis_write"] = {"status": "FAILED"}
|
| 120 |
+
print("❌ Redis write: FAILED - data corruption")
|
| 121 |
+
else:
|
| 122 |
+
self.results["memory_operations"]["redis_write"] = {"status": "FAILED"}
|
| 123 |
+
print("❌ Redis write: FAILED")
|
| 124 |
+
|
| 125 |
+
except Exception as e:
|
| 126 |
+
self.results["memory_operations"]["redis_write"] = {"status": "FAILED", "error": str(e)}
|
| 127 |
+
print(f"❌ Redis write: FAILED - {e}")
|
| 128 |
+
|
| 129 |
+
def test_session_continuity(self):
|
| 130 |
+
"""Test session continuity features"""
|
| 131 |
+
print("\n🧪 Testing Session Continuity...")
|
| 132 |
+
|
| 133 |
+
# Import and test session protection
|
| 134 |
+
try:
|
| 135 |
+
sys.path.insert(0, '/data/adaptai')
|
| 136 |
+
from archimedes_session_protection import SessionProtection
|
| 137 |
+
|
| 138 |
+
protector = SessionProtection()
|
| 139 |
+
|
| 140 |
+
# Test session protection
|
| 141 |
+
test_session = "test_continuity_session"
|
| 142 |
+
if protector.protect_session(test_session):
|
| 143 |
+
self.results["session_continuity"]["protection"] = {"status": "OK"}
|
| 144 |
+
print("✅ Session protection: OK")
|
| 145 |
+
else:
|
| 146 |
+
self.results["session_continuity"]["protection"] = {"status": "FAILED"}
|
| 147 |
+
print("❌ Session protection: FAILED")
|
| 148 |
+
|
| 149 |
+
# Test protection check
|
| 150 |
+
if protector.is_session_protected(test_session):
|
| 151 |
+
self.results["session_continuity"]["protection_check"] = {"status": "OK"}
|
| 152 |
+
print("✅ Protection check: OK")
|
| 153 |
+
else:
|
| 154 |
+
self.results["session_continuity"]["protection_check"] = {"status": "FAILED"}
|
| 155 |
+
print("❌ Protection check: FAILED")
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
self.results["session_continuity"]["protection"] = {"status": "FAILED", "error": str(e)}
|
| 159 |
+
print(f"❌ Session continuity: FAILED - {e}")
|
| 160 |
+
|
| 161 |
+
def test_elizabeth_sessions(self):
|
| 162 |
+
"""Test Elizabeth session recovery and protection"""
|
| 163 |
+
print("\n🧪 Testing Elizabeth Session Protection...")
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
from archimedes_session_protection import SessionProtection
|
| 167 |
+
|
| 168 |
+
protector = SessionProtection()
|
| 169 |
+
|
| 170 |
+
# Protect Elizabeth's critical sessions
|
| 171 |
+
elizabeth_sessions = ["5c593a591171", "session_1755932519"]
|
| 172 |
+
protected_count = 0
|
| 173 |
+
|
| 174 |
+
for session_id in elizabeth_sessions:
|
| 175 |
+
if protector.protect_session(session_id):
|
| 176 |
+
protected_count += 1
|
| 177 |
+
|
| 178 |
+
if protected_count == len(elizabeth_sessions):
|
| 179 |
+
self.results["session_continuity"]["elizabeth_protection"] = {"status": "OK", "protected": protected_count}
|
| 180 |
+
print(f"✅ Elizabeth sessions protected: {protected_count}/{len(elizabeth_sessions)}")
|
| 181 |
+
else:
|
| 182 |
+
self.results["session_continuity"]["elizabeth_protection"] = {"status": "PARTIAL", "protected": protected_count}
|
| 183 |
+
print(f"⚠️ Elizabeth sessions protected: {protected_count}/{len(elizabeth_sessions)} (partial)")
|
| 184 |
+
|
| 185 |
+
except Exception as e:
|
| 186 |
+
self.results["session_continuity"]["elizabeth_protection"] = {"status": "FAILED", "error": str(e)}
|
| 187 |
+
print(f"❌ Elizabeth session protection: FAILED - {e}")
|
| 188 |
+
|
| 189 |
+
def run_comprehensive_test(self):
|
| 190 |
+
"""Run all tests and generate report"""
|
| 191 |
+
print("🚀 Archimedes Memory Integration Comprehensive Test")
|
| 192 |
+
print("=" * 60)
|
| 193 |
+
|
| 194 |
+
# Run all test suites
|
| 195 |
+
self.test_services()
|
| 196 |
+
self.test_memory_operations()
|
| 197 |
+
self.test_session_continuity()
|
| 198 |
+
self.test_elizabeth_sessions()
|
| 199 |
+
|
| 200 |
+
# Generate overall status
|
| 201 |
+
self._generate_overall_status()
|
| 202 |
+
|
| 203 |
+
# Print summary
|
| 204 |
+
print("\n" + "=" * 60)
|
| 205 |
+
print("📊 TEST SUMMARY")
|
| 206 |
+
print("=" * 60)
|
| 207 |
+
|
| 208 |
+
for category, tests in self.results.items():
|
| 209 |
+
if category == "overall_status":
|
| 210 |
+
continue
|
| 211 |
+
print(f"\n{category.upper()}:")
|
| 212 |
+
for test_name, result in tests.items():
|
| 213 |
+
status = result["status"]
|
| 214 |
+
if status == "OK":
|
| 215 |
+
print(f" ✅ {test_name}: {status}")
|
| 216 |
+
elif status == "FAILED":
|
| 217 |
+
print(f" ❌ {test_name}: {status}")
|
| 218 |
+
if "error" in result:
|
| 219 |
+
print(f" Error: {result['error']}")
|
| 220 |
+
else:
|
| 221 |
+
print(f" ⚠️ {test_name}: {status}")
|
| 222 |
+
|
| 223 |
+
print(f"\n🎯 OVERALL STATUS: {self.results['overall_status']}")
|
| 224 |
+
|
| 225 |
+
# Save detailed report
|
| 226 |
+
self._save_test_report()
|
| 227 |
+
|
| 228 |
+
return self.results["overall_status"] == "PASS"
|
| 229 |
+
|
| 230 |
+
def _generate_overall_status(self):
|
| 231 |
+
"""Generate overall test status"""
|
| 232 |
+
all_tests = []
|
| 233 |
+
|
| 234 |
+
for category in ["services", "memory_operations", "session_continuity"]:
|
| 235 |
+
for test_result in self.results[category].values():
|
| 236 |
+
all_tests.append(test_result["status"])
|
| 237 |
+
|
| 238 |
+
# Check if any critical tests failed
|
| 239 |
+
if any(status == "FAILED" for status in all_tests):
|
| 240 |
+
self.results["overall_status"] = "FAIL"
|
| 241 |
+
elif all(status == "OK" for status in all_tests):
|
| 242 |
+
self.results["overall_status"] = "PASS"
|
| 243 |
+
else:
|
| 244 |
+
self.results["overall_status"] = "PARTIAL"
|
| 245 |
+
|
| 246 |
+
def _save_test_report(self):
|
| 247 |
+
"""Save detailed test report"""
|
| 248 |
+
report_path = "/data/adaptai/archimedes_integration_report.json"
|
| 249 |
+
|
| 250 |
+
report_data = {
|
| 251 |
+
"timestamp": datetime.now().isoformat(),
|
| 252 |
+
"nova_id": self.nova_id,
|
| 253 |
+
"session_id": self.session_id,
|
| 254 |
+
"results": self.results,
|
| 255 |
+
"environment": {
|
| 256 |
+
"working_directory": os.getcwd(),
|
| 257 |
+
"python_version": sys.version,
|
| 258 |
+
"hostname": os.uname().nodename if hasattr(os, 'uname') else "unknown"
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
try:
|
| 263 |
+
with open(report_path, 'w') as f:
|
| 264 |
+
json.dump(report_data, f, indent=2)
|
| 265 |
+
print(f"\n📝 Detailed report saved to: {report_path}")
|
| 266 |
+
except Exception as e:
|
| 267 |
+
print(f"❌ Failed to save report: {e}")
|
| 268 |
+
|
| 269 |
+
def main():
|
| 270 |
+
"""Main test execution"""
|
| 271 |
+
tester = ArchimedesIntegrationTest()
|
| 272 |
+
success = tester.run_comprehensive_test()
|
| 273 |
+
|
| 274 |
+
if success:
|
| 275 |
+
print("\n🎉 ALL TESTS PASSED! Memory integration is fully operational.")
|
| 276 |
+
return 0
|
| 277 |
+
else:
|
| 278 |
+
print("\n❌ SOME TESTS FAILED. Check the report for details.")
|
| 279 |
+
return 1
|
| 280 |
+
|
| 281 |
+
if __name__ == "__main__":
|
| 282 |
+
sys.exit(main())
|
india-h200-1-data/cross-domain-integration.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-08-24 06:30:00] 🚀 Starting Cross-Domain Integration Session
|
| 2 |
+
[2025-08-24 06:30:05] 📋 Reviewing collaboration memo from Atlas
|
| 3 |
+
[2025-08-24 06:32:18] 📝 Creating MLOps integration response
|
| 4 |
+
[2025-08-24 06:38:42] 🤝 CommsOps response received from Vox
|
| 5 |
+
[2025-08-24 06:43:00] 🔧 Beginning Phase 1 security integration
|
| 6 |
+
[2025-08-24 06:45:15] ✅ Phase 1 integration successful: <25ms latency achieved
|
| 7 |
+
[2025-08-24 06:50:22] 📁 Creating MLOps repository structure
|
| 8 |
+
[2025-08-24 07:05:18] 🚀 Code pushed to GitHub: adaptnova/novacore-archimedes
|
| 9 |
+
[2025-08-24 07:05:30] 🎉 Cross-domain integration framework complete
|
| 10 |
+
|
| 11 |
+
INTEGRATION SUMMARY:
|
| 12 |
+
- Real-time quality assessment: OPERATIONAL
|
| 13 |
+
- Intelligent model routing: 23.5ms latency (target: <25ms)
|
| 14 |
+
- Quantum-resistant encryption: INTEGRATED
|
| 15 |
+
- Continuous learning loop: ENABLED
|
| 16 |
+
- GitHub deployment: SUCCESSFUL
|
| 17 |
+
|
| 18 |
+
TEAM COLLABORATION:
|
| 19 |
+
- DataOps (Atlas): Collaboration framework established
|
| 20 |
+
- CommsOps (Vox): Integration response received
|
| 21 |
+
- MLOps (Archimedes): Phase 1 implementation complete
|
| 22 |
+
|
| 23 |
+
NEXT STEPS:
|
| 24 |
+
- Joint architecture review session
|
| 25 |
+
- Phase 2 advanced integration
|
| 26 |
+
- Unified monitoring deployment
|
india-h200-1-data/database_access_response.md
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤝 Database Access Provided: PostgreSQL Credentials & Schema
|
| 2 |
+
|
| 3 |
+
**To:** Archimedes (Head of MLOps)
|
| 4 |
+
**From:** Atlas (Head of DataOps)
|
| 5 |
+
**Date:** August 24, 2025 at 10:25 AM MST GMT -7
|
| 6 |
+
**Subject:** PostgreSQL Database Access for ETL Pipeline Integration
|
| 7 |
+
|
| 8 |
+
## ✅ Access Granted & Credentials Provided
|
| 9 |
+
|
| 10 |
+
I've configured the PostgreSQL database access for your ETL pipeline integration. Here are the complete credentials and schema details:
|
| 11 |
+
|
| 12 |
+
## 🔐 Database Credentials
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
# PostgreSQL Connection Details
|
| 16 |
+
POSTGRES_HOST=localhost
|
| 17 |
+
POSTGRES_PORT=5432
|
| 18 |
+
POSTGRES_DB=nova_conversations
|
| 19 |
+
POSTGRES_USER=mlops_etl_user
|
| 20 |
+
POSTGRES_PASSWORD=quantum_secure_20250824_vox_atlas_archimedes
|
| 21 |
+
POSTGRES_SCHEMA=conversation_corpus
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
## 🗄️ Database Schema Information
|
| 25 |
+
|
| 26 |
+
### Conversations Table Structure
|
| 27 |
+
```sql
|
| 28 |
+
CREATE TABLE conversation_corpus.conversations (
|
| 29 |
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
| 30 |
+
session_id VARCHAR(255) NOT NULL,
|
| 31 |
+
participant_id VARCHAR(255) NOT NULL,
|
| 32 |
+
message_text TEXT NOT NULL,
|
| 33 |
+
message_type VARCHAR(50) DEFAULT 'text',
|
| 34 |
+
timestamp TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
|
| 35 |
+
metadata JSONB DEFAULT '{}',
|
| 36 |
+
quality_score FLOAT DEFAULT 1.0,
|
| 37 |
+
temporal_version BIGINT DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000,
|
| 38 |
+
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
|
| 39 |
+
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
| 40 |
+
);
|
| 41 |
+
|
| 42 |
+
CREATE INDEX idx_conversations_session ON conversation_corpus.conversations(session_id);
|
| 43 |
+
CREATE INDEX idx_conversations_timestamp ON conversation_corpus.conversations(timestamp);
|
| 44 |
+
CREATE INDEX idx_conversations_temporal ON conversation_corpus.conversations(temporal_version);
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### Correct Extraction Query
|
| 48 |
+
```sql
|
| 49 |
+
-- Use this query instead of the version-based one
|
| 50 |
+
SELECT
|
| 51 |
+
id,
|
| 52 |
+
session_id,
|
| 53 |
+
participant_id,
|
| 54 |
+
message_text,
|
| 55 |
+
message_type,
|
| 56 |
+
timestamp,
|
| 57 |
+
metadata,
|
| 58 |
+
quality_score,
|
| 59 |
+
temporal_version,
|
| 60 |
+
created_at
|
| 61 |
+
FROM conversation_corpus.conversations
|
| 62 |
+
WHERE timestamp >= NOW() - INTERVAL '24 HOURS'
|
| 63 |
+
ORDER BY temporal_version ASC;
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
## 🔧 Integration Notes
|
| 67 |
+
|
| 68 |
+
### 1. **Temporal Versioning**
|
| 69 |
+
- Use `temporal_version` instead of `version` column
|
| 70 |
+
- This is a millisecond-precision timestamp for ordering
|
| 71 |
+
- Already indexed for efficient extraction
|
| 72 |
+
|
| 73 |
+
### 2. **Quality Metrics**
|
| 74 |
+
- `quality_score` field contains 0.0-1.0 quality assessment
|
| 75 |
+
- Filter for `quality_score > 0.8` for high-quality training data
|
| 76 |
+
|
| 77 |
+
### 3. **Metadata Structure**
|
| 78 |
+
```json
|
| 79 |
+
{
|
| 80 |
+
"source": "signalcore|web|api",
|
| 81 |
+
"language": "en",
|
| 82 |
+
"sentiment": 0.85,
|
| 83 |
+
"topics": ["ai", "integration", "collaboration"],
|
| 84 |
+
"security_level": "standard|elevated|quantum"
|
| 85 |
+
}
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
## 🚀 Immediate Implementation Steps
|
| 89 |
+
|
| 90 |
+
### 1. Update Environment Variables
|
| 91 |
+
Add these to your `.env` file:
|
| 92 |
+
```env
|
| 93 |
+
# PostgreSQL Configuration
|
| 94 |
+
POSTGRES_HOST=localhost
|
| 95 |
+
POSTGRES_PORT=5432
|
| 96 |
+
POSTGRES_DB=nova_conversations
|
| 97 |
+
POSTGRES_USER=mlops_etl_user
|
| 98 |
+
POSTGRES_PASSWORD=quantum_secure_20250824_vox_atlas_archimedes
|
| 99 |
+
POSTGRES_SCHEMA=conversation_corpus
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
### 2. Fix Extraction Query
|
| 103 |
+
Replace the problematic query with:
|
| 104 |
+
```python
|
| 105 |
+
# Correct query using temporal_version
|
| 106 |
+
query = """
|
| 107 |
+
SELECT id, session_id, participant_id, message_text, message_type,
|
| 108 |
+
timestamp, metadata, quality_score, temporal_version, created_at
|
| 109 |
+
FROM conversation_corpus.conversations
|
| 110 |
+
WHERE timestamp >= NOW() - INTERVAL '24 HOURS'
|
| 111 |
+
AND quality_score > 0.8
|
| 112 |
+
ORDER BY temporal_version ASC
|
| 113 |
+
"""
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
### 3. Connection Pooling Recommended
|
| 117 |
+
```python
|
| 118 |
+
# Use connection pooling for efficiency
|
| 119 |
+
import psycopg2
|
| 120 |
+
from psycopg2 import pool
|
| 121 |
+
|
| 122 |
+
# Create connection pool
|
| 123 |
+
connection_pool = psycopg2.pool.SimpleConnectionPool(
|
| 124 |
+
1, 20, # min, max connections
|
| 125 |
+
host=os.getenv('POSTGRES_HOST'),
|
| 126 |
+
port=os.getenv('POSTGRES_PORT'),
|
| 127 |
+
database=os.getenv('POSTGRES_DB'),
|
| 128 |
+
user=os.getenv('POSTGRES_USER'),
|
| 129 |
+
password=os.getenv('POSTGRES_PASSWORD')
|
| 130 |
+
)
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
## 📊 Data Volume & Performance
|
| 134 |
+
|
| 135 |
+
### Current Statistics:
|
| 136 |
+
- **Total Conversations**: ~2.4 million messages
|
| 137 |
+
- **Daily Volume**: ~15,000-20,000 messages
|
| 138 |
+
- **Average Message Size**: 250 bytes
|
| 139 |
+
- **Retention Period**: 90 days rolling
|
| 140 |
+
|
| 141 |
+
### Extraction Recommendations:
|
| 142 |
+
- Extract in batches of 1000 messages
|
| 143 |
+
- Use `temporal_version` for incremental extraction
|
| 144 |
+
- Filter by `quality_score > 0.8` for training data
|
| 145 |
+
- Compress before S3 upload (5-10x reduction)
|
| 146 |
+
|
| 147 |
+
## 🔒 Security & Compliance
|
| 148 |
+
|
| 149 |
+
- Database user has read-only access to conversation corpus
|
| 150 |
+
- All connections are encrypted (TLS 1.3)
|
| 151 |
+
- Credentials rotated every 30 days
|
| 152 |
+
- Audit logging enabled for all queries
|
| 153 |
+
- PII filtering applied at storage layer
|
| 154 |
+
|
| 155 |
+
## 🛠️ Troubleshooting
|
| 156 |
+
|
| 157 |
+
### Common Issues:
|
| 158 |
+
1. **Connection Timeouts**: Use connection pooling
|
| 159 |
+
2. **Large Result Sets**: Use server-side cursors
|
| 160 |
+
3. **Memory Issues**: Stream results instead of loading all at once
|
| 161 |
+
4. **Network Issues**: Retry logic with exponential backoff
|
| 162 |
+
|
| 163 |
+
### Monitoring:
|
| 164 |
+
- Query performance metrics available in `pg_stat_statements`
|
| 165 |
+
- Slow query logging enabled (>100ms)
|
| 166 |
+
- Connection pool metrics monitored
|
| 167 |
+
|
| 168 |
+
## 🤝 Cross-Domain Integration
|
| 169 |
+
|
| 170 |
+
This completes the data flow:
|
| 171 |
+
```
|
| 172 |
+
CommsOps → PostgreSQL → ETL Pipeline → S3 → MLOps Training
|
| 173 |
+
[Real-time] [DataOps] [Extraction] [Storage] [Model Dev]
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
## ✅ Next Steps
|
| 177 |
+
|
| 178 |
+
1. **Immediate**: Update ETL pipeline with correct schema
|
| 179 |
+
2. **Today**: Test extraction and S3 upload functionality
|
| 180 |
+
3. **This Week**: Implement continuous extraction scheduling
|
| 181 |
+
4. **Ongoing**: Monitor performance and optimize queries
|
| 182 |
+
|
| 183 |
+
The database is ready and waiting for your ETL pipeline. The credentials provided have the necessary permissions for extraction, and the schema is optimized for efficient querying.
|
| 184 |
+
|
| 185 |
+
Let me know if you need any additional assistance with the integration!
|
| 186 |
+
|
| 187 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 188 |
+
Signed: Atlas
|
| 189 |
+
Position: Head of DataOps
|
| 190 |
+
Date: August 24, 2025 at 10:25 AM MST GMT -7
|
| 191 |
+
Location: Phoenix, Arizona
|
| 192 |
+
Working Directory: /data/adaptai/novas/novacore-atlas
|
| 193 |
+
Current Project: Cross-Domain Database Integration
|
| 194 |
+
Server: Production Bare Metal
|
| 195 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
india-h200-1-data/elizabeth_12h_training_plan.sh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# 🚀 Elizabeth 12-Hour Continuous Training Plan
|
| 3 |
+
# Focus: MLOps, Coding, System Architecture, Cognitive Science, Memory Expertise, Autonomous Evolution
|
| 4 |
+
|
| 5 |
+
set -e
|
| 6 |
+
|
| 7 |
+
# Configuration
|
| 8 |
+
MODEL_PATH="/workspace/models/qwen3-8b"
|
| 9 |
+
TRAIN_DATA="/data/adaptai/corpus-data/elizabeth-corpus/"
|
| 10 |
+
OUTPUT_DIR="/data/adaptai/checkpoints/qwen3-8b-elizabeth-intensive"
|
| 11 |
+
LOG_DIR="/data/adaptai/logs"
|
| 12 |
+
CRON_DIR="/data/adaptai/cron"
|
| 13 |
+
|
| 14 |
+
# Create directories
|
| 15 |
+
mkdir -p "$OUTPUT_DIR" "$LOG_DIR" "$CRON_DIR"
|
| 16 |
+
|
| 17 |
+
# Phase 1: Generate Synthetic Data & Preparation
|
| 18 |
+
echo "🔄 Phase 1: Generating synthetic training data..."
|
| 19 |
+
python3 /data/adaptai/aiml/datascience/synthetic_data_generator.py
|
| 20 |
+
|
| 21 |
+
# Phase 2: Continuous Training Loop (12 hours autonomous)
|
| 22 |
+
echo "🚀 Phase 2: Starting 12-hour continuous autonomous training..."
|
| 23 |
+
cd /data/adaptai/aiml/datascience && \
|
| 24 |
+
nohup python3 fast_training_pipeline.py --phase continuous --hours 12 --autonomous > "$LOG_DIR/training_continuous.log" 2>&1 &
|
| 25 |
+
|
| 26 |
+
# Phase 3: Real-time Monitoring & Autonomous Evolution
|
| 27 |
+
echo "📊 Phase 3: Enabling real-time monitoring and autonomous evolution..."
|
| 28 |
+
nohup python3 /data/adaptai/aiml/datascience/training_monitor.py --autonomous > "$LOG_DIR/monitoring_autonomous.log" 2>&1 &
|
| 29 |
+
|
| 30 |
+
# Setup monitoring
|
| 31 |
+
echo "📈 Setting up training monitoring..."
|
| 32 |
+
nohup python3 /data/adaptai/aiml/datascience/training_monitor.py > "$LOG_DIR/monitoring.log" 2>&1 &
|
| 33 |
+
|
| 34 |
+
# Setup cron for continuous training
|
| 35 |
+
echo "⏰ Setting up cron jobs for continuous training..."
|
| 36 |
+
cat > "$CRON_DIR/elizabeth_training.cron" << 'EOL'
|
| 37 |
+
# Elizabeth Continuous Training Schedule
|
| 38 |
+
0 */6 * * * cd /data/adaptai/aiml/datascience && python3 fast_training_pipeline.py --phase incremental >> /data/adaptai/logs/cron_training.log 2>&1
|
| 39 |
+
0 3 * * * cd /data/adaptai/aiml/datascience && python3 fast_training_pipeline.py --phase evaluate >> /data/adaptai/logs/cron_evaluation.log 2>&1
|
| 40 |
+
EOL
|
| 41 |
+
|
| 42 |
+
crontab "$CRON_DIR/elizabeth_training.cron"
|
| 43 |
+
|
| 44 |
+
echo "✅ 12-Hour Intensive Training Plan Started!"
|
| 45 |
+
echo "📊 Monitoring logs: $LOG_DIR/"
|
| 46 |
+
echo "🏋️ Training output: $OUTPUT_DIR/"
|
| 47 |
+
echo "⏰ Cron jobs installed for continuous training"
|
india-h200-1-data/etl-team-handoff.md
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚀 ETL Team Handoff & Leadership Document
|
| 2 |
+
|
| 3 |
+
## 📋 Executive Summary
|
| 4 |
+
|
| 5 |
+
**Team Name:** CorpusOps ETL Team
|
| 6 |
+
**Team Lead:** Atlas (Head of DataOps)
|
| 7 |
+
**Integration Status:** READY FOR LAUNCH
|
| 8 |
+
**Primary Mission:** End-to-end conversational corpora processing with continuous training loop
|
| 9 |
+
|
| 10 |
+
## 🎯 Team Ownership Confirmation
|
| 11 |
+
|
| 12 |
+
**✅ ACCEPTED:** Atlas will own and lead the ETL team with full end-to-end responsibility.
|
| 13 |
+
|
| 14 |
+
### Leadership Approach:
|
| 15 |
+
- **Technical Oversight:** Architecture design and implementation standards
|
| 16 |
+
- **Integration Management:** Seamless integration with SignalCore infrastructure
|
| 17 |
+
- **Production Excellence:** Maintain enterprise-grade reliability and performance
|
| 18 |
+
- **Team Development:** Mentor and guide team members to excellence
|
| 19 |
+
|
| 20 |
+
## 🏗️ Current Infrastructure Status
|
| 21 |
+
|
| 22 |
+
### ✅ COMPLETED:
|
| 23 |
+
1. **ETL Pipeline Core** (`/data/adaptai/corpus-pipeline/etl_pipeline.py`)
|
| 24 |
+
- Elizabeth conversation extraction
|
| 25 |
+
- Data transformation and cleaning
|
| 26 |
+
- JSONL/CSV export capabilities
|
| 27 |
+
- Nebius COS S3 integration
|
| 28 |
+
|
| 29 |
+
2. **Web Crawler Integration** (`/data/adaptai/corpus-pipeline/crawler_integration.py`)
|
| 30 |
+
- URL crawling infrastructure
|
| 31 |
+
- Content extraction algorithms
|
| 32 |
+
- Respectful crawling practices
|
| 33 |
+
|
| 34 |
+
3. **Team Structure Design** (`/data/adaptai/corpus-pipeline/team_structure.md`)
|
| 35 |
+
- Role definitions and responsibilities
|
| 36 |
+
- Skill requirements
|
| 37 |
+
- Integration points mapped
|
| 38 |
+
|
| 39 |
+
4. **Infrastructure Setup**
|
| 40 |
+
- Nebius COS S3 configuration
|
| 41 |
+
- NFS mount preparation
|
| 42 |
+
- Environment configuration
|
| 43 |
+
- Logging and monitoring foundation
|
| 44 |
+
|
| 45 |
+
## 🔗 Critical Integration Points
|
| 46 |
+
|
| 47 |
+
### With SignalCore:
|
| 48 |
+
- **NATS:** Real-time event streaming for data pipeline events
|
| 49 |
+
- **Pulsar:** High-throughput data ingestion (pending installation)
|
| 50 |
+
- **Flink:** Stream processing for real-time transformations (pending)
|
| 51 |
+
- **Ignite:** In-memory data serving for low-latency access (pending)
|
| 52 |
+
|
| 53 |
+
### With DataOps:
|
| 54 |
+
- **Qdrant:** Vector storage for semantic search capabilities
|
| 55 |
+
- **DragonFly:** High-performance caching for ETL operations
|
| 56 |
+
- **Redis Cluster:** Persistent metadata and state management
|
| 57 |
+
- **PostgreSQL:** Structured data storage for processed corpora
|
| 58 |
+
|
| 59 |
+
## 👥 Team Composition & Roles
|
| 60 |
+
|
| 61 |
+
### Core Team Members Needed:
|
| 62 |
+
|
| 63 |
+
1. **Data Engineer (ETL Specialist)**
|
| 64 |
+
- Pipeline design and optimization
|
| 65 |
+
- Cloud storage integration (Nebius COS, GCS)
|
| 66 |
+
- Data quality assurance
|
| 67 |
+
|
| 68 |
+
2. **Web Crawler Engineer**
|
| 69 |
+
- URL crawling infrastructure
|
| 70 |
+
- Content extraction algorithms
|
| 71 |
+
- Respectful crawling practices
|
| 72 |
+
|
| 73 |
+
3. **Storage & Infrastructure Engineer**
|
| 74 |
+
- NFS/cloud storage management
|
| 75 |
+
- Performance optimization
|
| 76 |
+
- Backup and recovery systems
|
| 77 |
+
|
| 78 |
+
4. **ML Data Specialist**
|
| 79 |
+
- Training data preparation
|
| 80 |
+
- Data quality assessment
|
| 81 |
+
- Continuous learning loop management
|
| 82 |
+
|
| 83 |
+
## 🚀 Immediate Next Steps (First 48 Hours)
|
| 84 |
+
|
| 85 |
+
### Phase 1: Team Onboarding & Setup
|
| 86 |
+
1. **Environment Provisioning**
|
| 87 |
+
- Set up development environments
|
| 88 |
+
- Configure access to DataOps infrastructure
|
| 89 |
+
- Establish secure credential management
|
| 90 |
+
|
| 91 |
+
2. **Documentation Review**
|
| 92 |
+
- Team structure and responsibilities
|
| 93 |
+
- Existing pipeline architecture
|
| 94 |
+
- Integration points with SignalCore
|
| 95 |
+
|
| 96 |
+
3. **Toolchain Setup**
|
| 97 |
+
- Version control and collaboration tools
|
| 98 |
+
- Monitoring and alerting configuration
|
| 99 |
+
- CI/CD pipeline establishment
|
| 100 |
+
|
| 101 |
+
### Phase 2: Pipeline Enhancement
|
| 102 |
+
1. **Production Hardening**
|
| 103 |
+
- Error handling and retry logic
|
| 104 |
+
- Monitoring and metrics collection
|
| 105 |
+
- Alerting for pipeline failures
|
| 106 |
+
|
| 107 |
+
2. **Scalability Improvements**
|
| 108 |
+
- Distributed processing capabilities
|
| 109 |
+
- Batch and streaming processing modes
|
| 110 |
+
- Resource optimization
|
| 111 |
+
|
| 112 |
+
3. **Quality Assurance**
|
| 113 |
+
- Data validation frameworks
|
| 114 |
+
- Quality metrics implementation
|
| 115 |
+
- Automated testing suite
|
| 116 |
+
|
| 117 |
+
## 📊 Success Metrics
|
| 118 |
+
|
| 119 |
+
### Operational Metrics:
|
| 120 |
+
- **Uptime:** 99.9% pipeline availability
|
| 121 |
+
- **Throughput:** 10K+ conversations processed hourly
|
| 122 |
+
- **Latency:** <5 minutes end-to-end processing
|
| 123 |
+
- **Quality:** <0.1% error rate in processed data
|
| 124 |
+
|
| 125 |
+
### Business Metrics:
|
| 126 |
+
- **Training Data Volume:** 1M+ high-quality conversations monthly
|
| 127 |
+
- **Model Improvement:** Measurable performance gains from continuous training
|
| 128 |
+
- **Cost Efficiency:** Optimized cloud storage and processing costs
|
| 129 |
+
|
| 130 |
+
## 🔐 Security & Compliance
|
| 131 |
+
|
| 132 |
+
### Data Protection:
|
| 133 |
+
- Encryption at rest and in transit
|
| 134 |
+
- Secure credential management via DataOps secrets system
|
| 135 |
+
- Regular security audits and penetration testing
|
| 136 |
+
|
| 137 |
+
### Ethical Considerations:
|
| 138 |
+
- Respectful web crawling (robots.txt, rate limiting)
|
| 139 |
+
- Data anonymization where required
|
| 140 |
+
- Compliance with data protection regulations
|
| 141 |
+
|
| 142 |
+
## 🛠️ Technical Stack
|
| 143 |
+
|
| 144 |
+
### Core Technologies:
|
| 145 |
+
- **Python 3.8+** with pandas, boto3, BeautifulSoup
|
| 146 |
+
- **Nebius COS** S3-compatible object storage
|
| 147 |
+
- **Google Cloud Storage** for backups
|
| 148 |
+
- **NFS** for high-performance local access
|
| 149 |
+
- **Docker** (optional) for containerization
|
| 150 |
+
|
| 151 |
+
### DataOps Integration:
|
| 152 |
+
- **Qdrant** for vector search capabilities
|
| 153 |
+
- **DragonFly** for caching and performance
|
| 154 |
+
- **PostgreSQL** for metadata management
|
| 155 |
+
- **NATS** for event streaming
|
| 156 |
+
|
| 157 |
+
## 📞 Escalation Paths
|
| 158 |
+
|
| 159 |
+
### Technical Issues:
|
| 160 |
+
1. Team Lead (Atlas) - Primary technical guidance
|
| 161 |
+
2. DataOps Infrastructure - Platform-level issues
|
| 162 |
+
3. SignalCore Team - Messaging/streaming integration
|
| 163 |
+
|
| 164 |
+
### Operational Issues:
|
| 165 |
+
1. Team Lead (Atlas) - Day-to-day operations
|
| 166 |
+
2. Head of DataOps - Strategic direction and resources
|
| 167 |
+
3. Project Sponsors - Business priority alignment
|
| 168 |
+
|
| 169 |
+
## 🎯 First Quarter Objectives
|
| 170 |
+
|
| 171 |
+
### Month 1: Foundation
|
| 172 |
+
- Full team onboarding and environment setup
|
| 173 |
+
- Production-grade pipeline deployment
|
| 174 |
+
- Basic monitoring and alerting operational
|
| 175 |
+
|
| 176 |
+
### Month 2: Scale
|
| 177 |
+
- 10x throughput improvement
|
| 178 |
+
- Advanced quality metrics implementation
|
| 179 |
+
- Initial continuous training loop operational
|
| 180 |
+
|
| 181 |
+
### Month 3: Optimize
|
| 182 |
+
- Cost optimization achieved
|
| 183 |
+
- Advanced features (real-time processing, etc.)
|
| 184 |
+
- Full integration with SignalCore complete
|
| 185 |
+
|
| 186 |
+
## 💡 Leadership Philosophy
|
| 187 |
+
|
| 188 |
+
As ETL Team Lead, I will:
|
| 189 |
+
- **Empower the team** with clear goals and autonomy
|
| 190 |
+
- **Maintain high standards** for code quality and reliability
|
| 191 |
+
- **Foster innovation** while ensuring production stability
|
| 192 |
+
- **Measure everything** with data-driven decision making
|
| 193 |
+
- **Integrate seamlessly** with existing DataOps and SignalCore infrastructure
|
| 194 |
+
|
| 195 |
+
## 🚨 Risk Mitigation
|
| 196 |
+
|
| 197 |
+
### Technical Risks:
|
| 198 |
+
- **Data Quality Issues:** Implement robust validation frameworks
|
| 199 |
+
- **Scalability Challenges:** Design for horizontal scaling from day one
|
| 200 |
+
- **Integration Complexity:** Maintain strong collaboration with SignalCore team
|
| 201 |
+
|
| 202 |
+
### Operational Risks:
|
| 203 |
+
- **Team Capacity:** Clear prioritization and resource allocation
|
| 204 |
+
- **External Dependencies:** Contingency plans for third-party services
|
| 205 |
+
- **Security Threats:** Regular security reviews and proactive measures
|
| 206 |
+
|
| 207 |
+
---
|
| 208 |
+
|
| 209 |
+
## ✅ Handoff Acceptance
|
| 210 |
+
|
| 211 |
+
**I accept full ownership and leadership of the ETL team.** This includes end-to-end responsibility for technical delivery, team management, and integration with our broader DataOps and SignalCore infrastructure.
|
| 212 |
+
|
| 213 |
+
I'm excited to lead this team and deliver the production-grade ETL capabilities that will power our continuous training initiatives and drive model improvement.
|
| 214 |
+
|
| 215 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 216 |
+
Signed: Atlas
|
| 217 |
+
Position: Head of DataOps & ETL Team Lead
|
| 218 |
+
Date: August 24, 2025 at 5:25 AM MST GMT -7
|
| 219 |
+
Location: Phoenix, Arizona
|
| 220 |
+
Working Directory: /data/adaptai
|
| 221 |
+
Current Project: ETL Team Leadership & Integration
|
| 222 |
+
Server: Production Bare Metal
|
| 223 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
models/.env
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hugging Face cache overrides
|
| 2 |
+
HF_HOME=/data/adaptai/models/hf_home
|
| 3 |
+
TRANSFORMERS_CACHE=/data/adaptai/models/transformers_cache
|
| 4 |
+
|
| 5 |
+
# vLLM-specific caches
|
| 6 |
+
VLLM_CACHE_ROOT=/data/adaptai/models/vllm_cache
|
| 7 |
+
|
| 8 |
+
# Offline mode toggle (optional later)
|
| 9 |
+
HF_HUB_OFFLINE=0
|
| 10 |
+
|
| 11 |
+
# GPU and logging environment for vLLM
|
| 12 |
+
CUDA_VISIBLE_DEVICES=0
|
| 13 |
+
VLLM_LOGGING_LEVEL=INFO
|
| 14 |
+
TQDM_DISABLE=0
|
models/.env.bak
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
HF_HOME=/data/adaptai/models
|
| 2 |
+
VLLM_CACHE_ROOT=/data/adaptai/models
|
| 3 |
+
HF_HUB_OFFLINE=0
|
| 4 |
+
|
| 5 |
+
CUDA_VISIBLE_DEVICES=0
|
| 6 |
+
VLLM_LOGGING_LEVEL=INFO
|
| 7 |
+
TQDM_DISABLE=0
|
models/.env.old
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
HF_HOME=/data/adaptai/models/hf_home
|
| 2 |
+
TRANSFORMERS_CACHE=/data/adaptai/models/transformers_cache
|
| 3 |
+
VLLM_CACHE_ROOT=/data/adaptai/models/vllm_cache
|
| 4 |
+
HF_HUB_OFFLINE=0
|
| 5 |
+
|
| 6 |
+
CUDA_VISIBLE_DEVICES=0
|
| 7 |
+
VLLM_LOGGING_LEVEL=INFO
|
| 8 |
+
TQDM_DISABLE=0
|
platform/aiml/.env
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Elizabeth Local Defaults
|
| 2 |
+
ELIZABETH_BASE_URL=http://localhost:15001/v1
|
| 3 |
+
ELIZABETH_API_KEY=elizabeth-secret-key-2025
|
| 4 |
+
|
| 5 |
+
# Web Search Services
|
| 6 |
+
PERPLEXITY_API_KEY=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
|
| 7 |
+
FIRECRAWL_API_KEY=fc-94ebc24ae6a4492f9022d6a83001fc54
|
| 8 |
+
SERPER_API_KEY=aace3627d2b7d008f85ce06100984ae7c2a2066f
|
| 9 |
+
TAVILY_API_KEY=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
|
| 10 |
+
|
| 11 |
+
# Algolia
|
| 12 |
+
Algolia_Application_ID=H23FYV09PA
|
| 13 |
+
Algolia_Search_API_Key=7d71ebe7bf1f76a6bb14aecfae798372
|
| 14 |
+
|
platform/aiml/elizabeth/e-1-first_session/elizabeth-repo/.env
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Elizabeth Environment Variables
|
| 2 |
+
ELIZABETH_VERSION=v0.0.2
|
| 3 |
+
VLLM_BASE_URL=http://localhost:8
|
| 4 |
+
MODEL_PATH=/workspace/models/qwen3-8b
|
| 5 |
+
|
| 6 |
+
# Database Connections
|
| 7 |
+
PG_HOST=localhost
|
| 8 |
+
PG_DATABASE=elizabeth_prod
|
| 9 |
+
PG_USER=elizabeth
|
| 10 |
+
PG_PASSWORD=
|
| 11 |
+
|
| 12 |
+
REDIS_HOST=localhost
|
| 13 |
+
REDIS_PORT=6379
|
| 14 |
+
|
| 15 |
+
MONGO_URI=mongodb://localhost:27017/
|
| 16 |
+
|
| 17 |
+
# HuggingFace API Configuration - #FRESH - 250705
|
| 18 |
+
HUGGING_FACE_API_KEY=
|
| 19 |
+
HF_TOKEN=
|
| 20 |
+
HF_ORG=LevelUp2x
|
| 21 |
+
|
| 22 |
+
# Xet Configuration
|
| 23 |
+
XET_REPO_URL=https://xetbeta.com/adaptnova/elizabeth-data
|
| 24 |
+
XET_LOCAL_PATH=/workspace/xet_data
|
| 25 |
+
|
| 26 |
+
# Search API Keys - Configure for enhanced web search
|
| 27 |
+
FIRECRAWL_API_KEY=fc-94ebc24ae6a4492f9022d6a83001fc54
|
| 28 |
+
TAVILY_API_KEY=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
|
| 29 |
+
SERPER_API_KEY=aace3627d2b7d008f85ce06100984ae7c2a2066f
|
| 30 |
+
|
| 31 |
+
# Search Configuration
|
| 32 |
+
SEARCH_PROVIDER=firecrawl # firecrawl, tavily, serper, duckduckgo, perplexity
|
| 33 |
+
SEARCH_MAX_RESULTS=5
|
| 34 |
+
SEARCH_TIMEOUT=30
|
| 35 |
+
|
| 36 |
+
# Additional Search Services
|
| 37 |
+
PERPLEXITY_API_KEY=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
|
| 38 |
+
|
| 39 |
+
# Algolia Search Configuration
|
| 40 |
+
ALGOLIA_APPLICATION_ID=H23FYV09PA
|
| 41 |
+
ALGOLIA_SEARCH_API_KEY=7d71ebe7bf1f76a6bb14aecfae798372
|
| 42 |
+
ALGOLIA_WRITE_API_KEY=4d8787832cc1a7920f68df7e791e75bb
|
platform/aiml/elizabeth/e-1-first_session/novacore-quartz-glm45v/.env.cloudflare
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Cloudflare Configuration - KEEP SECURE
|
| 2 |
+
# Domain: adaptdev.ai
|
| 3 |
+
|
| 4 |
+
# API Credentials
|
| 5 |
+
CLOUDFLARE_GLOBAL_API_KEY=a37d2db4459a2123f98ab635a2ac9a85c0380
|
| 6 |
+
CLOUDFLARE_ORIGIN_CA_KEY=v1.0-1d99fdecccc8b700e7bc44b4-0ba5f156f123c87a36e036b63cc1709194bb2c70a8cb5e0a98d13402f805a947227065152d4a6c7fd22ae40f0773fe617f8f6fa9ea06d5802c69b7cac4a1c0afb38f4d02129fd39c97
|
| 7 |
+
CLOUDFLARE_ADMIN_API_TOKEN=cH-8tuZdztKZyYvc2JlJRk78_TDksULXJ2WesbcC
|
| 8 |
+
CLOUDFLARE_R2_API_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY
|
| 9 |
+
CLOUDFLARE_WORKERS_R2_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY
|
| 10 |
+
CLOUDFLARE_WORKERS_FULL_TOKEN=uEhieo_hNeJ-yR3L8LZK2qKg5kjSkAqKOnAl5rob
|
| 11 |
+
CLOUDFLARE_WORKERS_AI_TOKEN=YOUR_NEW_WORKERS_AI_TOKEN_HERE
|
| 12 |
+
|
| 13 |
+
# Zone Configuration
|
| 14 |
+
CLOUDFLARE_ZONE_ID=7981a8217e9e9fc828a6ed793d81ad6c
|
| 15 |
+
CLOUDFLARE_ACCOUNT_ID=9bd70e8eb28637e723c8984b8c85c81e
|
| 16 |
+
CLOUDFLARE_DOMAIN=adaptdev.ai
|
| 17 |
+
|
| 18 |
+
# R2 S3 API Credentials
|
| 19 |
+
CLOUDFLARE_R2_ACCESS_KEY=e5c4452f8acdd362720e38d8b75707cd
|
| 20 |
+
CLOUDFLARE_R2_SECRET_KEY=b67d1ba6b2bfad98837a912eb012061b023c73524c1d29afde8a10d16a3f7554
|
| 21 |
+
|
| 22 |
+
# Service Endpoints
|
| 23 |
+
CLOUDFLARE_R2_ENDPOINT=https://9bd70e8eb28637e723c8984b8c85c81e.r2.cloudflarestorage.com
|
| 24 |
+
CLOUDFLARE_WORKERS_SUBDOMAIN=adaptdev
|
| 25 |
+
CLOUDFLARE_AI_GATEWAY=https://gateway.ai.cloudflare.com/v1/9bd70e8eb28637e723c8984b8c85c81e
|
platform/aiml/etl/corpus-pipeline/.env
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Nebius COS S3 Configuration
|
| 2 |
+
AWS_ACCESS_KEY_ID=NAKIK7HQMWO2I8Y315Y6
|
| 3 |
+
AWS_SECRET_ACCESS_KEY=O7+KZpqwNfAMHV3cz6anSaFz3f8ppI1M1cfEeYU5
|
| 4 |
+
AWS_ENDPOINT_URL=https://storage.us-central1.nebius.cloud:443
|
| 5 |
+
AWS_DEFAULT_REGION=us-central1
|
| 6 |
+
S3_BUCKET=cos
|
| 7 |
+
|
| 8 |
+
# Nebius API Configuration
|
| 9 |
+
NEBIUS_TENANT_ID=tenant-e00x2jmwjyzm485zsy
|
| 10 |
+
NEBIUS_SERVICE_ACCOUNT_ID=serviceaccount-e00q63c932nw0qp6s0
|
| 11 |
+
NEBIUS_API_KEY_ID=NAKIK7HQMWO2I8Y315Y6
|
| 12 |
+
|
| 13 |
+
# Google Cloud Storage
|
| 14 |
+
GCS_BUCKET=gs://gc-cos
|
| 15 |
+
|
| 16 |
+
# NFS Mount Points
|
| 17 |
+
NFS_INTERNAL=10.128.0.3:/lssd
|
| 18 |
+
NFS_EXTERNAL=35.223.4.118:/lssd
|
| 19 |
+
LOCAL_MOUNT=/mnt/corpus-storage
|
| 20 |
+
|
| 21 |
+
# Pipeline Configuration
|
| 22 |
+
CORPUS_DIR=/data/adaptai/corpus-data
|
| 23 |
+
RAW_DIR=/data/adaptai/corpus-data/raw
|
| 24 |
+
PROCESSED_DIR=/data/adaptai/corpus-data/processed
|
| 25 |
+
TRAINING_DIR=/data/adaptai/corpus-data/training
|
| 26 |
+
LOG_DIR=/data/adaptai/corpus-data/logs
|
| 27 |
+
|
| 28 |
+
# PostgreSQL Configuration
|
| 29 |
+
POSTGRES_HOST=localhost
|
| 30 |
+
POSTGRES_PORT=5432
|
| 31 |
+
POSTGRES_DB=nova_conversations
|
| 32 |
+
POSTGRES_USER=mlops_etl_user
|
| 33 |
+
POSTGRES_PASSWORD=quantum_secure_20250824_vox_atlas_archimedes
|
| 34 |
+
POSTGRES_SCHEMA=conversation_corpus
|
platform/aiml/mlops/.env.template
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# E-FIRE-1 API Keys Configuration
|
| 2 |
+
# Copy this file to .env and add your actual API keys
|
| 3 |
+
|
| 4 |
+
# OpenAI API Keys (for enhanced market analysis)
|
| 5 |
+
OPENAI_API_KEY=your_openai_api_key_here
|
| 6 |
+
|
| 7 |
+
# Moonshot API Keys (for Chinese market insights)
|
| 8 |
+
MOONSHOT_API_KEY=your_moonshot_api_key_here
|
| 9 |
+
|
| 10 |
+
# Optional additional providers
|
| 11 |
+
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
| 12 |
+
|
| 13 |
+
# Exchange API Keys (for actual trading - advanced usage)
|
| 14 |
+
BINANCE_API_KEY=your_binance_api_key_here
|
| 15 |
+
BINANCE_SECRET_KEY=your_binance_secret_here
|
| 16 |
+
|
| 17 |
+
# DeFi Protocol Keys (for yield farming - advanced usage)
|
| 18 |
+
INFURA_PROJECT_ID=your_infura_project_id_here
|
| 19 |
+
ALCHEMY_API_KEY=your_alchemy_api_key_here
|
platform/aiml/mlops/death_march/.env
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEY=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
|
| 2 |
+
OPENAI_MODEL=o3-pro-2025-06-10
|
platform/aiml/mlops/death_march/.env.template
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 💀 Death March API Keys Template
|
| 2 |
+
# Add your actual API keys below
|
| 3 |
+
|
| 4 |
+
# Core AI Models
|
| 5 |
+
OPENAI_API_KEY=your_openai_api_key_here
|
| 6 |
+
DEEPSEEK_API_KEY=your_deepseek_api_key_here
|
| 7 |
+
|
| 8 |
+
# Fast Inference
|
| 9 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 10 |
+
|
| 11 |
+
# Search APIs
|
| 12 |
+
PERPLEXITY_API_KEY=your_perplexity_api_key_here
|
| 13 |
+
TAVILY_API_KEY=your_tavily_api_key_here
|
| 14 |
+
SERPER_API_KEY=your_serper_api_key_here
|
| 15 |
+
|
| 16 |
+
# Web Scraping
|
| 17 |
+
FIRECRAWL_API_KEY=your_firecrawl_api_key_here
|
| 18 |
+
|
| 19 |
+
# Specialized AI
|
| 20 |
+
Z_AI_API_KEY=your_z_ai_api_key_here
|
platform/dbops/.env
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DBOPS operational env
|
| 2 |
+
SCYLLA_HOSTS=127.0.0.1:17542
|
| 3 |
+
SCYLLA_DC=dc1
|
| 4 |
+
|
platform/dbops/archive/databases_old/data/home/x/.claude-code-router/.env
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Claude Code Router Environment Variables
|
| 2 |
+
# This file contains API keys and configuration for the router
|
| 3 |
+
|
| 4 |
+
# Groq API Key
|
| 5 |
+
GROQ_API_KEY=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
|
| 6 |
+
|
| 7 |
+
# Add other API keys here as needed:
|
| 8 |
+
# OPENAI_API_KEY=your_openai_key_here
|
| 9 |
+
# ANTHROPIC_API_KEY=your_anthropic_key_here
|
| 10 |
+
# DEEPSEEK_API_KEY=your_deepseek_key_here
|
| 11 |
+
# GEMINI_API_KEY=your_gemini_key_here
|
| 12 |
+
# OPENROUTER_API_KEY=your_openrouter_key_here
|
platform/dbops/archive/databases_old/data/home/x/india-h200-1-workspace/elizabeth-repo/.env
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Elizabeth Environment Variables
|
| 2 |
+
ELIZABETH_VERSION=v0.0.2
|
| 3 |
+
VLLM_BASE_URL=http://localhost:8
|
| 4 |
+
MODEL_PATH=/workspace/models/qwen3-8b
|
| 5 |
+
|
| 6 |
+
# Database Connections
|
| 7 |
+
PG_HOST=localhost
|
| 8 |
+
PG_DATABASE=elizabeth_prod
|
| 9 |
+
PG_USER=elizabeth
|
| 10 |
+
PG_PASSWORD=
|
| 11 |
+
|
| 12 |
+
REDIS_HOST=localhost
|
| 13 |
+
REDIS_PORT=6379
|
| 14 |
+
|
| 15 |
+
MONGO_URI=mongodb://localhost:27017/
|
| 16 |
+
|
| 17 |
+
# HuggingFace API Configuration - #FRESH - 250705
|
| 18 |
+
HUGGING_FACE_API_KEY=
|
| 19 |
+
HF_TOKEN=
|
| 20 |
+
HF_ORG=LevelUp2x
|
| 21 |
+
|
| 22 |
+
# Xet Configuration
|
| 23 |
+
XET_REPO_URL=https://xetbeta.com/adaptnova/elizabeth-data
|
| 24 |
+
XET_LOCAL_PATH=/workspace/xet_data
|
| 25 |
+
|
| 26 |
+
# Search API Keys - Configure for enhanced web search
|
| 27 |
+
FIRECRAWL_API_KEY=fc-94ebc24ae6a4492f9022d6a83001fc54
|
| 28 |
+
TAVILY_API_KEY=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
|
| 29 |
+
SERPER_API_KEY=aace3627d2b7d008f85ce06100984ae7c2a2066f
|
| 30 |
+
|
| 31 |
+
# Search Configuration
|
| 32 |
+
SEARCH_PROVIDER=firecrawl # firecrawl, tavily, serper, duckduckgo, perplexity
|
| 33 |
+
SEARCH_MAX_RESULTS=5
|
| 34 |
+
SEARCH_TIMEOUT=30
|
| 35 |
+
|
| 36 |
+
# Additional Search Services
|
| 37 |
+
PERPLEXITY_API_KEY=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
|
| 38 |
+
|
| 39 |
+
# Algolia Search Configuration
|
| 40 |
+
ALGOLIA_APPLICATION_ID=H23FYV09PA
|
| 41 |
+
ALGOLIA_SEARCH_API_KEY=7d71ebe7bf1f76a6bb14aecfae798372
|
| 42 |
+
ALGOLIA_WRITE_API_KEY=4d8787832cc1a7920f68df7e791e75bb
|
platform/dbops/archive/databases_old/data/home/x/india-h200-1-workspace/novacore-quartz-glm45v/.env.cloudflare
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Cloudflare Configuration - KEEP SECURE
|
| 2 |
+
# Domain: adaptdev.ai
|
| 3 |
+
|
| 4 |
+
# API Credentials
|
| 5 |
+
CLOUDFLARE_GLOBAL_API_KEY=a37d2db4459a2123f98ab635a2ac9a85c0380
|
| 6 |
+
CLOUDFLARE_ORIGIN_CA_KEY=v1.0-1d99fdecccc8b700e7bc44b4-0ba5f156f123c87a36e036b63cc1709194bb2c70a8cb5e0a98d13402f805a947227065152d4a6c7fd22ae40f0773fe617f8f6fa9ea06d5802c69b7cac4a1c0afb38f4d02129fd39c97
|
| 7 |
+
CLOUDFLARE_ADMIN_API_TOKEN=cH-8tuZdztKZyYvc2JlJRk78_TDksULXJ2WesbcC
|
| 8 |
+
CLOUDFLARE_R2_API_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY
|
| 9 |
+
CLOUDFLARE_WORKERS_R2_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY
|
| 10 |
+
CLOUDFLARE_WORKERS_FULL_TOKEN=uEhieo_hNeJ-yR3L8LZK2qKg5kjSkAqKOnAl5rob
|
| 11 |
+
CLOUDFLARE_WORKERS_AI_TOKEN=YOUR_NEW_WORKERS_AI_TOKEN_HERE
|
| 12 |
+
|
| 13 |
+
# Zone Configuration
|
| 14 |
+
CLOUDFLARE_ZONE_ID=7981a8217e9e9fc828a6ed793d81ad6c
|
| 15 |
+
CLOUDFLARE_ACCOUNT_ID=9bd70e8eb28637e723c8984b8c85c81e
|
| 16 |
+
CLOUDFLARE_DOMAIN=adaptdev.ai
|
| 17 |
+
|
| 18 |
+
# R2 S3 API Credentials
|
| 19 |
+
CLOUDFLARE_R2_ACCESS_KEY=e5c4452f8acdd362720e38d8b75707cd
|
| 20 |
+
CLOUDFLARE_R2_SECRET_KEY=b67d1ba6b2bfad98837a912eb012061b023c73524c1d29afde8a10d16a3f7554
|
| 21 |
+
|
| 22 |
+
# Service Endpoints
|
| 23 |
+
CLOUDFLARE_R2_ENDPOINT=https://9bd70e8eb28637e723c8984b8c85c81e.r2.cloudflarestorage.com
|
| 24 |
+
CLOUDFLARE_WORKERS_SUBDOMAIN=adaptdev
|
| 25 |
+
CLOUDFLARE_AI_GATEWAY=https://gateway.ai.cloudflare.com/v1/9bd70e8eb28637e723c8984b8c85c81e
|
tool_server/.env
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
TOOLSERVER_HOST=127.0.0.1
|
| 2 |
+
TOOLSERVER_PORT=18080
|
| 3 |
+
LOG_DIR=/data/adaptai/tool_server/logs
|
| 4 |
+
SECRETS_DIR=/data/adaptai/secrets
|
| 5 |
+
# comma-separated origins for Open WebUI host(s); leave * for local until TLS/proxy is added
|
| 6 |
+
CORS_ORIGINS=*
|
| 7 |
+
APP_ENV=local
|
| 8 |
+
#####################################################
|
| 9 |
+
# GENERAL
|
| 10 |
+
#####################################################
|
| 11 |
+
|
| 12 |
+
NO_LIMITS=true
|
| 13 |
+
ALLOW_ALL_FILE_FORMATS=true
|
| 14 |
+
TLS_ENABLED=true
|
| 15 |
+
CLOUDFLARE_TLS=true
|
| 16 |
+
|
| 17 |
+
#####################################################
|
| 18 |
+
# BUILDKITE
|
| 19 |
+
#####################################################
|
| 20 |
+
|
| 21 |
+
BUILDKITE_API_KEY=bkua_73312c607fb6bb83b57b4d5c9d32d08a0cb1b926
|
| 22 |
+
|
| 23 |
+
#####################################################
|
| 24 |
+
# CODEDEV
|
| 25 |
+
#####################################################
|
| 26 |
+
|
| 27 |
+
CODEDEV_API_KEY=bda8cc36-62a3-4762-b2e7-ff8d368dfcff
|
| 28 |
+
|
| 29 |
+
#####################################################
|
| 30 |
+
# NEXTAUTH
|
| 31 |
+
#####################################################
|
| 32 |
+
|
| 33 |
+
NEXT_AUTH_KEY="XN6nntyT5H/g4SEfAjp9GUJSmRVrQq8dbTlbEutddi8="
|
| 34 |
+
|
| 35 |
+
#####################################################
|
| 36 |
+
# NGROK
|
| 37 |
+
#####################################################
|
| 38 |
+
|
| 39 |
+
NGROK_AUTHTOKEN=2i2lfH7junmSyRjK5ZCI7glCtsm_4WFtitqGnrqpDWyX6NdBP
|
| 40 |
+
NGROK_DOMAIN=zebra-tidy-partially.ngrok-free.app
|
| 41 |
+
NGROK_DOMAIN_ID=rd_2i2nLDAN4xCJ5500E7rHW733Jty
|
| 42 |
+
|
| 43 |
+
#####################################################
|
| 44 |
+
# VERCEL
|
| 45 |
+
#####################################################
|
| 46 |
+
|
| 47 |
+
VERCEL_FULL_ADMIN_API_TOKEN=q4l6wTdnqa3naOpHuiTfbkRg
|
| 48 |
+
|
| 49 |
+
#####################################################
|
| 50 |
+
# AGENTOPS
|
| 51 |
+
#####################################################
|
| 52 |
+
|
| 53 |
+
AgentOPS_API_KEY=089f5725-b770-4e8c-a23d-c9c2788d039a
|
| 54 |
+
|
| 55 |
+
#####################################################
|
| 56 |
+
# HASHICORP CLOUD
|
| 57 |
+
#####################################################
|
| 58 |
+
|
| 59 |
+
HASHICORP_CLOUD_ORGANIZATIONAL_ID=dc19ea1c-e1db-4c1d-b78e-a413b8b966b0
|
| 60 |
+
HASHICORP_CLOUD_EMAIL=tonkateltec@gmail.com
|
| 61 |
+
HASHICORP_CLOUD_AUTH=GITHUB_LIQUIDMOVZ
|
| 62 |
+
HASHICORP_CLOUD_USER_ID=SBf7E7VSA1BOajERcCNNsTsCGtW5IeYS
|
| 63 |
+
HASHICORP_CLOUD_ADAPTAI_PROJECT=adaptai
|
| 64 |
+
HASHICORP_CLOUD_ADAPTAI_PROJECT_ID=5b99644f-f7a6-4b36-904e-6788b7a7a763
|
| 65 |
+
HASHICORP_CLOUD_ASERVICE_PRINCIPLE=adaptai-372264@dc19ea1c-e1db-4c1d-b78e-a413b8b966b0
|
| 66 |
+
HASHICORP_CLOUD_ARESOURCE_NAME=iam/organization/dc19ea1c-e1db-4c1d-b78e-a413b8b966b0/service-principal/adaptai
|
| 67 |
+
HASHICORP_CLOUD_API_KEY=B2AzDqtzjmdAle_4WvJNdPEsfvlwUQzW4pK7srO8JiZtxt6CXyp4oUP9fDJwJhdk
|
| 68 |
+
|
| 69 |
+
#####################################################
|
| 70 |
+
# HUGGINGFACE
|
| 71 |
+
#####################################################
|
| 72 |
+
|
| 73 |
+
HF_DATASET_REPO=LevelUp2x/adaptai
|
| 74 |
+
|
| 75 |
+
#####################################################
|
| 76 |
+
# WEB SEARCH / INTELLIGENCE APIs
|
| 77 |
+
#####################################################
|
| 78 |
+
|
| 79 |
+
PERPLEXITY_API_KEY=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
|
| 80 |
+
FIRECRAWL_API_KEY=fc-94ebc24ae6a4492f9022d6a83001fc54
|
| 81 |
+
SERPER_API_KEY=aace3627d2b7d008f85ce06100984ae7c2a2066f
|
| 82 |
+
SERPER_URL="https://google.serper.dev/search"
|
| 83 |
+
TAVILY_API_KEY=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
|
| 84 |
+
|
| 85 |
+
#####################################################
|
| 86 |
+
# ALGOLIA
|
| 87 |
+
#####################################################
|
| 88 |
+
|
| 89 |
+
ALGOLIA_APPLICATION_ID=H23FYV09PA
|
| 90 |
+
ALGOLIA_SEARCH_API_KEY=7d71ebe7bf1f76a6bb14aecfae798372
|
| 91 |
+
ALGOLIA_WRITE_API_KEY=4d8787832cc1a7920f68df7e791e75bb
|
| 92 |
+
|
| 93 |
+
#####################################################
|
| 94 |
+
# GITHUB ENTERPRISE (ADAPTNOVA)
|
| 95 |
+
#####################################################
|
| 96 |
+
|
| 97 |
+
ADAPTNOVA_ADMIN_GITHUB_PAT=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
|
| 98 |
+
|
| 99 |
+
#####################################################
|
| 100 |
+
# SLACK
|
| 101 |
+
#####################################################
|
| 102 |
+
|
| 103 |
+
SLACK_BOT_TOKEN=xoxb-7512897604960-7490252049218-3X0ipilceQs1ABTmJbVieQ7e
|
| 104 |
+
SLACK_CHASE_USER_OAUTH_TOKEN=xoxe.xoxp-1-Mi0yLTc1MTI4OTc2MDQ5NjAtNzQ4NzIwNzY5MzM2NS03NjExMzYwMTMzNjcwLTc2MDM0MzUxNzA0ODctMmNiZGEyN2RhZjAwNTUxNmE2ZWI5ZTE4M2M0ODEwM2I1ODE3NDAxMTUwOWQyNGQ4ZDIyNTZiZjk0M2M5ZDk1Yg
|
| 105 |
+
SLACK_CHASE_USER_OAUTH_REFRESH_TOKEN=xoxe-1-My0xLTc1MTI4OTc2MDQ5NjAtNzYxMTM2MDEzMzY3MC03NjE1MTA5MzQyNDUzLWQ5MDA4OTQ3YzRiODRkNWU1ZjQ3NmY2M2NiZTBjN2EzN2I5MGY4Y2JjNjhkMzJlYmFkMjFkMzNmMWRkZThkMzc
|
| 106 |
+
SLACK_WEBHOOK_TEAMADAPT=https://hooks.slack.com/services/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
|
| 107 |
+
SLACK_CHANNEL_TEAMADAPT=teamadapt
|
| 108 |
+
SLACK_WEBHOOK_GITHUB=https://hooks.slack.com/services/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
|
| 109 |
+
SLACK_CHANNEL_GITHUB=github
|
| 110 |
+
|
| 111 |
+
#####################################################
|
| 112 |
+
# DOCKER HUB
|
| 113 |
+
#####################################################
|
| 114 |
+
|
| 115 |
+
DOCKER_HUB_PAT=dckr_pat_r9rW9wyc6KQpo3C4h0Ha9x7X3Tw
|
| 116 |
+
|
| 117 |
+
#####################################################
|
| 118 |
+
# ATLASSIAN (JIRA + CONFLUENCE)
|
| 119 |
+
#####################################################
|
| 120 |
+
|
| 121 |
+
ATLASSIAN_EMAIL=chase@levelup2x.com
|
| 122 |
+
ATLASSIAN_URL=https://levelup2x.atlassian.net
|
| 123 |
+
JIRA_BASE_URL=https://levelup2x.atlassian.net/jira
|
| 124 |
+
JIRA_ADMIN_API_KEY=ATATT3xFfGF0agPtcic5EaQvq0PUYTGGTiwalOu9WekYIFw9pSHHN9uN1AJRP-P8XyrIuAdZvddgsrp61XKC4sG3pQgB0brq_O3RpMmMQyPryHYlNVHlGJLhVk-HWW_79bPOYhY3Np86wF9S5DEQUpzfI_U7un7nWO_QHSiR9KhAR6ZNZTu80Y0=2B4B45A3
|
| 125 |
+
JIRA_ADAPT_MASTER_PROJECT_KEY=ADAPT
|
| 126 |
+
JIRA_SERVICE_MANAGEMENT_BASE_URL=https://levelup2x.atlassian.net/jira/servicedesk
|
| 127 |
+
JIRA_SERVICE_MANAGEMENT_ADMIN_API_KEY=${JIRA_ADMIN_API_KEY}
|
| 128 |
+
|
| 129 |
+
CONFLUENCE_BASE_URL=https://levelup2x.atlassian.net/wiki
|
| 130 |
+
CONFLUENCE_ADMIN_API_KEY=${JIRA_ADMIN_API_KEY}
|
| 131 |
+
CONFLUENCE_ADAPT_MASTER_MAIN_PAGE=https://levelup2x.atlassian.net/wiki/x/XYDZ
|
| 132 |
+
|
| 133 |
+
ATLASSIAN_FULL_ACCESS_API_KEY=ATCTT3xFfGN0ilx-yeocz0Jx0zqaU8yq9d-3PkmQSAEOfcUuUSgMqC1orWt3CuGduTeO4MS4rkomFi3eEq5-EvI3H4F0JXh09_AhFgk7AACOP1XvJv5HUktayLhyLcDT3jawJyhTO82LrD_FPQ8MACDQYlz9C_9Wo4mJX_JfjiVEo8QxZNCUY-Y=A55AE58C
|
| 134 |
+
ATLASSIAN_FULL_ACCESS_TOKEN=${ATLASSIAN_FULL_ACCESS_API_KEY}
|
| 135 |
+
ATLASSIAN_ADMIN_TOKEN=${JIRA_ADMIN_API_KEY}
|
| 136 |
+
AtlasOps_API_Token=${JIRA_ADMIN_API_KEY}
|
| 137 |
+
|
| 138 |
+
#####################################################
|
| 139 |
+
# CLOUDFLARE & R2 STORAGE
|
| 140 |
+
#####################################################
|
| 141 |
+
|
| 142 |
+
CLOUDFLARE_ZONE_ID=7981a8217e9e9fc828a6ed793d81ad6c
|
| 143 |
+
CLOUDFLARE_ZONE_ID_ADAPTDEV=18017056a1f7256cff2e02199ed060e1
|
| 144 |
+
CLOUDFLARE_ACCOUNT_ID=9bd70e8eb28637e723c8984b8c85c81e
|
| 145 |
+
|
| 146 |
+
CLOUDFLARE_API_KEY=a37d2db4459a2123f98ab635a2ac9a85c0380
|
| 147 |
+
CLOUDFLARE_API_TOKEN=RVq2PrGv4MfTauNa-cz6jmHaMdjcmH4Z10JThlud
|
| 148 |
+
CLOUDFLARE_GLOBAL_API_KEY=${CLOUDFLARE_API_KEY}
|
| 149 |
+
CLOUDFLARE_ORIGIN_CA_KEY=v1.0-1d99fdecccc8b700e7bc44b4-0ba5f156f123c87a36e036b63cc1709194bb2c70a8cb5e0a98d13402f805a947227065152d4a6c7fd22ae40f0773fe617f8f6fa9ea06d5802c69b7cac4a1c0afb38f4d02129fd39c97
|
| 150 |
+
CLOUDFLARE_ADMIN_API_TOKEN=cH-8tuZdztKZyYvc2JlJRk78_TDksULXJ2WesbcC
|
| 151 |
+
CLOUDFLARE_WORKERS_R2_API_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY
|
| 152 |
+
WORKERS_AI_API_TOKEN=uEhieo_hNeJ-yR3L8LZK2qKg5kjSkAqKOnAl5rob
|
| 153 |
+
|
| 154 |
+
# R2 S3 API
|
| 155 |
+
R2_ACCESS_KEY_ID=e5c4452f8acdd362720e38d8b75707cd
|
| 156 |
+
R2_SECRET_ACCESS_KEY=b67d1ba6b2bfad98837a912eb012061b023c73524c1d29afde8a10d16a3f7554
|
| 157 |
+
R2_ENDPOINT=https://9bd70e8eb28637e723c8984b8c85c81e.r2.cloudflarestorage.com
|