#!/usr/bin/env python3
"""
DTO Confluence Client - Automated post-run report generation and publishing
Creates detailed analysis reports for completed data transfers
"""
import os
import json
from typing import Dict, Any, Optional, List
from datetime import datetime, timezone
import requests
from requests.auth import HTTPBasicAuth
import base64
from pathlib import Path
class DTOConfluenceClient:
def __init__(self, server_url: Optional[str] = None, username: Optional[str] = None,
api_token: Optional[str] = None, space_key: str = "DTO"):
self.server_url = server_url or os.getenv('CONFLUENCE_SERVER_URL')
self.username = username or os.getenv('CONFLUENCE_USERNAME')
self.api_token = api_token or os.getenv('CONFLUENCE_API_TOKEN')
self.space_key = space_key
self.auth = HTTPBasicAuth(self.username, self.api_token) if self.username and self.api_token else None
self.headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def test_connection(self) -> bool:
"""Test Confluence API connectivity"""
if not self.auth or not self.server_url:
print("❌ Confluence credentials not configured")
return False
try:
response = requests.get(
f"{self.server_url}/rest/api/user/current",
auth=self.auth,
headers=self.headers,
timeout=10
)
if response.status_code == 200:
user_info = response.json()
print(f"✅ Connected to Confluence as {user_info.get('displayName')}")
return True
else:
print(f"❌ Confluence connection failed: {response.status_code}")
return False
except Exception as e:
print(f"❌ Error connecting to Confluence: {e}")
return False
def get_or_create_parent_page(self) -> Optional[str]:
"""Get or create parent page for DTO reports"""
if not self.auth:
return None
try:
# Check if DTO Reports page exists
search_response = requests.get(
f"{self.server_url}/rest/api/content",
auth=self.auth,
headers=self.headers,
params={
'spaceKey': self.space_key,
'title': 'DTO Transfer Reports',
'type': 'page'
}
)
if search_response.status_code == 200:
results = search_response.json()['results']
if results:
page_id = results[0]['id']
print(f"✅ Found existing DTO Reports page: {page_id}")
return page_id
# Create parent page if it doesn't exist
parent_content = {
"type": "page",
"title": "DTO Transfer Reports",
"space": {"key": self.space_key},
"body": {
"storage": {
"value": """
Data Transfer Operations - Automated Reports
This page contains automated post-run analysis reports for all DTO transfers.
Reports are automatically generated after each transfer completion and include:
- Performance metrics and throughput analysis
- Data integrity validation results
- Infrastructure utilization statistics
- Error analysis and recommendations
- Cost analysis and optimization suggestions
Latest Reports:
creation
true
20
""",
"representation": "storage"
}
}
}
create_response = requests.post(
f"{self.server_url}/rest/api/content",
auth=self.auth,
headers=self.headers,
data=json.dumps(parent_content)
)
if create_response.status_code == 200:
page_id = create_response.json()['id']
print(f"✅ Created DTO Reports parent page: {page_id}")
return page_id
else:
print(f"❌ Failed to create parent page: {create_response.status_code}")
return None
except Exception as e:
print(f"❌ Error managing parent page: {e}")
return None
def generate_run_report_content(self, run_data: Dict[str, Any],
metrics: Dict[str, Any]) -> str:
"""Generate comprehensive run report content"""
run_id = run_data.get('run_id', 'unknown')
status = run_data.get('final_status', 'unknown')
data_class = run_data.get('data_class', 'unknown')
environment = run_data.get('environment', 'unknown')
# Performance metrics
throughput = metrics.get('average_throughput_mbps', 0)
duration_seconds = metrics.get('total_duration_seconds', 0)
data_size_gb = metrics.get('data_size_gb', 0)
# Convert duration to human readable
hours = duration_seconds // 3600
minutes = (duration_seconds % 3600) // 60
seconds = duration_seconds % 60
duration_str = f"{hours}h {minutes}m {seconds}s"
# Status color
status_color = {
'SUCCESS': '#00875A',
'FAILED': '#DE350B',
'PARTIAL': '#FF8B00'
}.get(status, '#6B778C')
content = f"""
DTO Transfer Report: {run_id}
| Run ID |
{run_id} |
Data Class |
Blue
{data_class}
|
| Status |
{status_color}
{status}
|
Environment |
{environment.upper()} |
| Data Size |
{data_size_gb:.2f} GB |
Duration |
{duration_str} |
| Average Throughput |
{throughput:.1f} MB/s |
Transfer Method |
{metrics.get('transfer_method', 'unknown')} |
📊 Performance Analysis
Throughput Performance
The transfer achieved an average throughput of {throughput:.1f} MB/s, transferring {data_size_gb:.2f} GB in {duration_str}.
| Metric |
Value |
Baseline |
Performance |
| Throughput |
{throughput:.1f} MB/s |
500 MB/s |
{'✅ Above baseline' if throughput >= 500 else '⚠️ Below baseline'} |
| Efficiency |
{(throughput/10000*100):.1f}% |
5% |
{'✅ Efficient' if throughput/10000 >= 0.05 else '⚠️ Inefficient'} |
Infrastructure Utilization
- Source Host: {metrics.get('source_host', 'unknown')}
- Target Host: {metrics.get('target_host', 'unknown')}
- Network Path: {metrics.get('network_path', 'Direct connection')}
- Compression: {metrics.get('compression_enabled', 'No')}
🔍 Data Integrity Validation
"""
# Add validation results if available
validation_results = metrics.get('validation_results', {})
if validation_results:
files_validated = validation_results.get('files_validated', 0)
files_passed = validation_results.get('files_passed', 0)
content += f"""
| Files Validated |
{files_validated} |
| Files Passed |
{files_passed} |
| Validation Rate |
{(files_passed/files_validated*100) if files_validated > 0 else 0:.1f}% |
| Checksum Algorithm |
{validation_results.get('checksum_algorithm', 'SHA-256')} |
"""
# Add error analysis if any issues
errors = metrics.get('errors', [])
if errors:
content += f"""
⚠️ Issues and Errors
Transfer Issues Detected
"""
for error in errors[:5]: # Limit to 5 errors
content += f" - {error}
\n"
content += """
"""
# Add recommendations
content += f"""
🎯 Recommendations
Performance Optimization
"""
if throughput < 500:
content += " - Consider increasing parallel transfer streams
\n"
content += " - Optimize network configuration for higher throughput
\n"
if duration_seconds > 7200: # 2 hours
content += " - For large transfers, consider using compression
\n"
content += " - Schedule transfers during off-peak hours
\n"
content += """
Cost Optimization
- Monitor bandwidth usage for cost optimization
- Consider data deduplication for recurring transfers
📋 Artifacts and Logs
The following artifacts were generated during this transfer:
"""
artifacts = metrics.get('artifacts', [])
for artifact in artifacts:
filename = Path(artifact).name
content += f" {filename} - {artifact} \n"
content += f"""
📈 Historical Context
This report was automatically generated on {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')} by the DTO automated reporting system.
Next Steps
- Review performance metrics against SLOs
- Analyze any validation failures
- Implement recommended optimizations
- Update data lineage documentation
"""
return content
def create_run_report(self, run_data: Dict[str, Any],
metrics: Dict[str, Any]) -> Optional[str]:
"""Create Confluence page for run report"""
if not self.auth:
print("❌ Confluence not configured")
return None
run_id = run_data.get('run_id', 'unknown')
data_class = run_data.get('data_class', 'unknown')
environment = run_data.get('environment', 'unknown')
# Get parent page
parent_page_id = self.get_or_create_parent_page()
if not parent_page_id:
print("❌ Could not get parent page")
return None
try:
# Generate report content
report_content = self.generate_run_report_content(run_data, metrics)
# Create page
page_data = {
"type": "page",
"title": f"Transfer Report: {run_id} ({data_class} - {environment})",
"space": {"key": self.space_key},
"ancestors": [{"id": parent_page_id}],
"body": {
"storage": {
"value": report_content,
"representation": "storage"
}
}
}
response = requests.post(
f"{self.server_url}/rest/api/content",
auth=self.auth,
headers=self.headers,
data=json.dumps(page_data)
)
if response.status_code == 200:
page_info = response.json()
page_id = page_info['id']
page_url = f"{self.server_url}/wiki{page_info['_links']['webui']}"
print(f"✅ Created Confluence report: {page_url}")
return page_url
else:
print(f"❌ Failed to create report: {response.status_code} - {response.text}")
return None
except Exception as e:
print(f"❌ Error creating report: {e}")
return None
def attach_file_to_page(self, page_id: str, file_path: str,
comment: str = "DTO transfer artifact") -> bool:
"""Attach file to Confluence page"""
if not self.auth or not Path(file_path).exists():
return False
try:
with open(file_path, 'rb') as f:
files = {
'file': (Path(file_path).name, f, 'application/octet-stream'),
'comment': (None, comment)
}
response = requests.post(
f"{self.server_url}/rest/api/content/{page_id}/child/attachment",
auth=self.auth,
files=files
)
if response.status_code == 200:
print(f"✅ Attached file to page: {Path(file_path).name}")
return True
else:
print(f"❌ Failed to attach file: {response.status_code}")
return False
except Exception as e:
print(f"❌ Error attaching file: {e}")
return False
def update_run_report(self, page_id: str, updated_metrics: Dict[str, Any]) -> bool:
"""Update existing run report with new metrics"""
if not self.auth:
return False
try:
# Get current page content
response = requests.get(
f"{self.server_url}/rest/api/content/{page_id}",
auth=self.auth,
headers=self.headers,
params={'expand': 'body.storage,version'}
)
if response.status_code != 200:
print(f"❌ Failed to get page: {response.status_code}")
return False
page_data = response.json()
current_version = page_data['version']['number']
# Update content (simplified - append metrics)
current_content = page_data['body']['storage']['value']
metrics_update = f"""
📊 Updated Metrics
Updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}
"""
for key, value in updated_metrics.items():
metrics_update += f" - {key}: {value}
\n"
metrics_update += "
\n"
updated_content = current_content + metrics_update
# Update page
update_data = {
"version": {"number": current_version + 1},
"title": page_data['title'],
"type": "page",
"body": {
"storage": {
"value": updated_content,
"representation": "storage"
}
}
}
update_response = requests.put(
f"{self.server_url}/rest/api/content/{page_id}",
auth=self.auth,
headers=self.headers,
data=json.dumps(update_data)
)
if update_response.status_code == 200:
print(f"✅ Updated Confluence report: {page_id}")
return True
else:
print(f"❌ Failed to update report: {update_response.status_code}")
return False
except Exception as e:
print(f"❌ Error updating report: {e}")
return False
# Test function
def test_confluence_integration():
"""Test Confluence integration with mock report"""
client = DTOConfluenceClient()
if not client.test_connection():
print("❌ Confluence integration test failed (expected without proper credentials)")
return False
# Test report creation
test_run_data = {
'run_id': 'test-confluence-001',
'job_id': 'test-job-001',
'data_class': 'CLASS_A',
'environment': 'production',
'final_status': 'SUCCESS',
'manifest_path': '/manifests/class_a/test.yaml'
}
test_metrics = {
'average_throughput_mbps': 734.2,
'total_duration_seconds': 5400, # 1.5 hours
'data_size_gb': 150.0,
'transfer_method': 'ssh+dd',
'source_host': 'vast2.adapt.ai',
'target_host': 'vast1.adapt.ai',
'validation_results': {
'files_validated': 145,
'files_passed': 145,
'checksum_algorithm': 'SHA-256'
},
'artifacts': [
'/logs/test-confluence-001.log',
'/reports/test-confluence-001.pdf',
'/checksums/test-confluence-001.sha256'
]
}
report_url = client.create_run_report(test_run_data, test_metrics)
if report_url:
print(f"✅ Confluence integration test completed: {report_url}")
return True
else:
print("❌ Failed to create test report")
return False
if __name__ == "__main__":
print("Testing DTO Confluence Integration...")
print("=" * 50)
test_confluence_integration()
print("\nTo use Confluence integration, set these environment variables:")
print("export CONFLUENCE_SERVER_URL=https://your-domain.atlassian.net/wiki")
print("export CONFLUENCE_USERNAME=your-email@company.com")
print("export CONFLUENCE_API_TOKEN=your-api-token")