|
|
|
|
|
""" |
|
|
DTO Confluence Client - Automated post-run report generation and publishing |
|
|
Creates detailed analysis reports for completed data transfers |
|
|
""" |
|
|
|
|
|
import os |
|
|
import json |
|
|
from typing import Dict, Any, Optional, List |
|
|
from datetime import datetime, timezone |
|
|
import requests |
|
|
from requests.auth import HTTPBasicAuth |
|
|
import base64 |
|
|
from pathlib import Path |
|
|
|
|
|
class DTOConfluenceClient: |
|
|
def __init__(self, server_url: Optional[str] = None, username: Optional[str] = None, |
|
|
api_token: Optional[str] = None, space_key: str = "DTO"): |
|
|
self.server_url = server_url or os.getenv('CONFLUENCE_SERVER_URL') |
|
|
self.username = username or os.getenv('CONFLUENCE_USERNAME') |
|
|
self.api_token = api_token or os.getenv('CONFLUENCE_API_TOKEN') |
|
|
self.space_key = space_key |
|
|
self.auth = HTTPBasicAuth(self.username, self.api_token) if self.username and self.api_token else None |
|
|
self.headers = { |
|
|
'Content-Type': 'application/json', |
|
|
'Accept': 'application/json' |
|
|
} |
|
|
|
|
|
def test_connection(self) -> bool: |
|
|
"""Test Confluence API connectivity""" |
|
|
if not self.auth or not self.server_url: |
|
|
print("β Confluence credentials not configured") |
|
|
return False |
|
|
|
|
|
try: |
|
|
response = requests.get( |
|
|
f"{self.server_url}/rest/api/user/current", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
timeout=10 |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
user_info = response.json() |
|
|
print(f"β
Connected to Confluence as {user_info.get('displayName')}") |
|
|
return True |
|
|
else: |
|
|
print(f"β Confluence connection failed: {response.status_code}") |
|
|
return False |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Error connecting to Confluence: {e}") |
|
|
return False |
|
|
|
|
|
def get_or_create_parent_page(self) -> Optional[str]: |
|
|
"""Get or create parent page for DTO reports""" |
|
|
if not self.auth: |
|
|
return None |
|
|
|
|
|
try: |
|
|
|
|
|
search_response = requests.get( |
|
|
f"{self.server_url}/rest/api/content", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
params={ |
|
|
'spaceKey': self.space_key, |
|
|
'title': 'DTO Transfer Reports', |
|
|
'type': 'page' |
|
|
} |
|
|
) |
|
|
|
|
|
if search_response.status_code == 200: |
|
|
results = search_response.json()['results'] |
|
|
if results: |
|
|
page_id = results[0]['id'] |
|
|
print(f"β
Found existing DTO Reports page: {page_id}") |
|
|
return page_id |
|
|
|
|
|
|
|
|
parent_content = { |
|
|
"type": "page", |
|
|
"title": "DTO Transfer Reports", |
|
|
"space": {"key": self.space_key}, |
|
|
"body": { |
|
|
"storage": { |
|
|
"value": """ |
|
|
<h2>Data Transfer Operations - Automated Reports</h2> |
|
|
<p>This page contains automated post-run analysis reports for all DTO transfers.</p> |
|
|
<p>Reports are automatically generated after each transfer completion and include:</p> |
|
|
<ul> |
|
|
<li>Performance metrics and throughput analysis</li> |
|
|
<li>Data integrity validation results</li> |
|
|
<li>Infrastructure utilization statistics</li> |
|
|
<li>Error analysis and recommendations</li> |
|
|
<li>Cost analysis and optimization suggestions</li> |
|
|
</ul> |
|
|
<p><strong>Latest Reports:</strong></p> |
|
|
<ac:structured-macro ac:name="children"> |
|
|
<ac:parameter ac:name="sort">creation</ac:parameter> |
|
|
<ac:parameter ac:name="reverse">true</ac:parameter> |
|
|
<ac:parameter ac:name="max">20</ac:parameter> |
|
|
</ac:structured-macro> |
|
|
""", |
|
|
"representation": "storage" |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
create_response = requests.post( |
|
|
f"{self.server_url}/rest/api/content", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
data=json.dumps(parent_content) |
|
|
) |
|
|
|
|
|
if create_response.status_code == 200: |
|
|
page_id = create_response.json()['id'] |
|
|
print(f"β
Created DTO Reports parent page: {page_id}") |
|
|
return page_id |
|
|
else: |
|
|
print(f"β Failed to create parent page: {create_response.status_code}") |
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Error managing parent page: {e}") |
|
|
return None |
|
|
|
|
|
def generate_run_report_content(self, run_data: Dict[str, Any], |
|
|
metrics: Dict[str, Any]) -> str: |
|
|
"""Generate comprehensive run report content""" |
|
|
run_id = run_data.get('run_id', 'unknown') |
|
|
status = run_data.get('final_status', 'unknown') |
|
|
data_class = run_data.get('data_class', 'unknown') |
|
|
environment = run_data.get('environment', 'unknown') |
|
|
|
|
|
|
|
|
throughput = metrics.get('average_throughput_mbps', 0) |
|
|
duration_seconds = metrics.get('total_duration_seconds', 0) |
|
|
data_size_gb = metrics.get('data_size_gb', 0) |
|
|
|
|
|
|
|
|
hours = duration_seconds // 3600 |
|
|
minutes = (duration_seconds % 3600) // 60 |
|
|
seconds = duration_seconds % 60 |
|
|
duration_str = f"{hours}h {minutes}m {seconds}s" |
|
|
|
|
|
|
|
|
status_color = { |
|
|
'SUCCESS': '#00875A', |
|
|
'FAILED': '#DE350B', |
|
|
'PARTIAL': '#FF8B00' |
|
|
}.get(status, '#6B778C') |
|
|
|
|
|
content = f""" |
|
|
<h1>DTO Transfer Report: {run_id}</h1> |
|
|
|
|
|
<table class="wrapped"> |
|
|
<tr> |
|
|
<th>Run ID</th> |
|
|
<td><strong>{run_id}</strong></td> |
|
|
<th>Data Class</th> |
|
|
<td><ac:structured-macro ac:name="status"> |
|
|
<ac:parameter ac:name="colour">Blue</ac:parameter> |
|
|
<ac:parameter ac:name="title">{data_class}</ac:parameter> |
|
|
</ac:structured-macro></td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Status</th> |
|
|
<td><ac:structured-macro ac:name="status"> |
|
|
<ac:parameter ac:name="colour">{status_color}</ac:parameter> |
|
|
<ac:parameter ac:name="title">{status}</ac:parameter> |
|
|
</ac:structured-macro></td> |
|
|
<th>Environment</th> |
|
|
<td>{environment.upper()}</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Data Size</th> |
|
|
<td>{data_size_gb:.2f} GB</td> |
|
|
<th>Duration</th> |
|
|
<td>{duration_str}</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Average Throughput</th> |
|
|
<td><strong>{throughput:.1f} MB/s</strong></td> |
|
|
<th>Transfer Method</th> |
|
|
<td>{metrics.get('transfer_method', 'unknown')}</td> |
|
|
</tr> |
|
|
</table> |
|
|
|
|
|
<h2>π Performance Analysis</h2> |
|
|
<h3>Throughput Performance</h3> |
|
|
<p>The transfer achieved an average throughput of <strong>{throughput:.1f} MB/s</strong>, transferring {data_size_gb:.2f} GB in {duration_str}.</p> |
|
|
|
|
|
<table class="wrapped"> |
|
|
<tr> |
|
|
<th>Metric</th> |
|
|
<th>Value</th> |
|
|
<th>Baseline</th> |
|
|
<th>Performance</th> |
|
|
</tr> |
|
|
<tr> |
|
|
<td>Throughput</td> |
|
|
<td>{throughput:.1f} MB/s</td> |
|
|
<td>500 MB/s</td> |
|
|
<td>{'β
Above baseline' if throughput >= 500 else 'β οΈ Below baseline'}</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<td>Efficiency</td> |
|
|
<td>{(throughput/10000*100):.1f}%</td> |
|
|
<td>5%</td> |
|
|
<td>{'β
Efficient' if throughput/10000 >= 0.05 else 'β οΈ Inefficient'}</td> |
|
|
</tr> |
|
|
</table> |
|
|
|
|
|
<h3>Infrastructure Utilization</h3> |
|
|
<ul> |
|
|
<li><strong>Source Host:</strong> {metrics.get('source_host', 'unknown')}</li> |
|
|
<li><strong>Target Host:</strong> {metrics.get('target_host', 'unknown')}</li> |
|
|
<li><strong>Network Path:</strong> {metrics.get('network_path', 'Direct connection')}</li> |
|
|
<li><strong>Compression:</strong> {metrics.get('compression_enabled', 'No')}</li> |
|
|
</ul> |
|
|
|
|
|
<h2>π Data Integrity Validation</h2> |
|
|
""" |
|
|
|
|
|
|
|
|
validation_results = metrics.get('validation_results', {}) |
|
|
if validation_results: |
|
|
files_validated = validation_results.get('files_validated', 0) |
|
|
files_passed = validation_results.get('files_passed', 0) |
|
|
|
|
|
content += f""" |
|
|
<table class="wrapped"> |
|
|
<tr> |
|
|
<th>Files Validated</th> |
|
|
<td>{files_validated}</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Files Passed</th> |
|
|
<td>{files_passed}</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Validation Rate</th> |
|
|
<td>{(files_passed/files_validated*100) if files_validated > 0 else 0:.1f}%</td> |
|
|
</tr> |
|
|
<tr> |
|
|
<th>Checksum Algorithm</th> |
|
|
<td>{validation_results.get('checksum_algorithm', 'SHA-256')}</td> |
|
|
</tr> |
|
|
</table> |
|
|
""" |
|
|
|
|
|
|
|
|
errors = metrics.get('errors', []) |
|
|
if errors: |
|
|
content += f""" |
|
|
<h2>β οΈ Issues and Errors</h2> |
|
|
<ac:structured-macro ac:name="warning"> |
|
|
<ac:parameter ac:name="title">Transfer Issues Detected</ac:parameter> |
|
|
<ac:rich-text-body> |
|
|
<ul> |
|
|
""" |
|
|
for error in errors[:5]: |
|
|
content += f" <li>{error}</li>\n" |
|
|
content += """ </ul> |
|
|
</ac:rich-text-body> |
|
|
</ac:structured-macro> |
|
|
""" |
|
|
|
|
|
|
|
|
content += f""" |
|
|
<h2>π― Recommendations</h2> |
|
|
<h3>Performance Optimization</h3> |
|
|
<ul> |
|
|
""" |
|
|
|
|
|
if throughput < 500: |
|
|
content += " <li>Consider increasing parallel transfer streams</li>\n" |
|
|
content += " <li>Optimize network configuration for higher throughput</li>\n" |
|
|
|
|
|
if duration_seconds > 7200: |
|
|
content += " <li>For large transfers, consider using compression</li>\n" |
|
|
content += " <li>Schedule transfers during off-peak hours</li>\n" |
|
|
|
|
|
content += """</ul> |
|
|
|
|
|
<h3>Cost Optimization</h3> |
|
|
<ul> |
|
|
<li>Monitor bandwidth usage for cost optimization</li> |
|
|
<li>Consider data deduplication for recurring transfers</li> |
|
|
</ul> |
|
|
|
|
|
<h2>π Artifacts and Logs</h2> |
|
|
<p>The following artifacts were generated during this transfer:</p> |
|
|
<ul> |
|
|
""" |
|
|
|
|
|
artifacts = metrics.get('artifacts', []) |
|
|
for artifact in artifacts: |
|
|
filename = Path(artifact).name |
|
|
content += f" <li><code>{filename}</code> - {artifact}</li>\n" |
|
|
|
|
|
content += f"""</ul> |
|
|
|
|
|
<h2>π Historical Context</h2> |
|
|
<p>This report was automatically generated on {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')} by the DTO automated reporting system.</p> |
|
|
|
|
|
<ac:structured-macro ac:name="info"> |
|
|
<ac:parameter ac:name="title">Next Steps</ac:parameter> |
|
|
<ac:rich-text-body> |
|
|
<ul> |
|
|
<li>Review performance metrics against SLOs</li> |
|
|
<li>Analyze any validation failures</li> |
|
|
<li>Implement recommended optimizations</li> |
|
|
<li>Update data lineage documentation</li> |
|
|
</ul> |
|
|
</ac:rich-text-body> |
|
|
</ac:structured-macro> |
|
|
""" |
|
|
|
|
|
return content |
|
|
|
|
|
def create_run_report(self, run_data: Dict[str, Any], |
|
|
metrics: Dict[str, Any]) -> Optional[str]: |
|
|
"""Create Confluence page for run report""" |
|
|
if not self.auth: |
|
|
print("β Confluence not configured") |
|
|
return None |
|
|
|
|
|
run_id = run_data.get('run_id', 'unknown') |
|
|
data_class = run_data.get('data_class', 'unknown') |
|
|
environment = run_data.get('environment', 'unknown') |
|
|
|
|
|
|
|
|
parent_page_id = self.get_or_create_parent_page() |
|
|
if not parent_page_id: |
|
|
print("β Could not get parent page") |
|
|
return None |
|
|
|
|
|
try: |
|
|
|
|
|
report_content = self.generate_run_report_content(run_data, metrics) |
|
|
|
|
|
|
|
|
page_data = { |
|
|
"type": "page", |
|
|
"title": f"Transfer Report: {run_id} ({data_class} - {environment})", |
|
|
"space": {"key": self.space_key}, |
|
|
"ancestors": [{"id": parent_page_id}], |
|
|
"body": { |
|
|
"storage": { |
|
|
"value": report_content, |
|
|
"representation": "storage" |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
response = requests.post( |
|
|
f"{self.server_url}/rest/api/content", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
data=json.dumps(page_data) |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
page_info = response.json() |
|
|
page_id = page_info['id'] |
|
|
page_url = f"{self.server_url}/wiki{page_info['_links']['webui']}" |
|
|
|
|
|
print(f"β
Created Confluence report: {page_url}") |
|
|
return page_url |
|
|
else: |
|
|
print(f"β Failed to create report: {response.status_code} - {response.text}") |
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Error creating report: {e}") |
|
|
return None |
|
|
|
|
|
def attach_file_to_page(self, page_id: str, file_path: str, |
|
|
comment: str = "DTO transfer artifact") -> bool: |
|
|
"""Attach file to Confluence page""" |
|
|
if not self.auth or not Path(file_path).exists(): |
|
|
return False |
|
|
|
|
|
try: |
|
|
with open(file_path, 'rb') as f: |
|
|
files = { |
|
|
'file': (Path(file_path).name, f, 'application/octet-stream'), |
|
|
'comment': (None, comment) |
|
|
} |
|
|
|
|
|
response = requests.post( |
|
|
f"{self.server_url}/rest/api/content/{page_id}/child/attachment", |
|
|
auth=self.auth, |
|
|
files=files |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
print(f"β
Attached file to page: {Path(file_path).name}") |
|
|
return True |
|
|
else: |
|
|
print(f"β Failed to attach file: {response.status_code}") |
|
|
return False |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Error attaching file: {e}") |
|
|
return False |
|
|
|
|
|
def update_run_report(self, page_id: str, updated_metrics: Dict[str, Any]) -> bool: |
|
|
"""Update existing run report with new metrics""" |
|
|
if not self.auth: |
|
|
return False |
|
|
|
|
|
try: |
|
|
|
|
|
response = requests.get( |
|
|
f"{self.server_url}/rest/api/content/{page_id}", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
params={'expand': 'body.storage,version'} |
|
|
) |
|
|
|
|
|
if response.status_code != 200: |
|
|
print(f"β Failed to get page: {response.status_code}") |
|
|
return False |
|
|
|
|
|
page_data = response.json() |
|
|
current_version = page_data['version']['number'] |
|
|
|
|
|
|
|
|
current_content = page_data['body']['storage']['value'] |
|
|
|
|
|
metrics_update = f""" |
|
|
<h2>π Updated Metrics</h2> |
|
|
<p><em>Updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}</em></p> |
|
|
<ul> |
|
|
""" |
|
|
for key, value in updated_metrics.items(): |
|
|
metrics_update += f" <li><strong>{key}:</strong> {value}</li>\n" |
|
|
metrics_update += "</ul>\n" |
|
|
|
|
|
updated_content = current_content + metrics_update |
|
|
|
|
|
|
|
|
update_data = { |
|
|
"version": {"number": current_version + 1}, |
|
|
"title": page_data['title'], |
|
|
"type": "page", |
|
|
"body": { |
|
|
"storage": { |
|
|
"value": updated_content, |
|
|
"representation": "storage" |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
update_response = requests.put( |
|
|
f"{self.server_url}/rest/api/content/{page_id}", |
|
|
auth=self.auth, |
|
|
headers=self.headers, |
|
|
data=json.dumps(update_data) |
|
|
) |
|
|
|
|
|
if update_response.status_code == 200: |
|
|
print(f"β
Updated Confluence report: {page_id}") |
|
|
return True |
|
|
else: |
|
|
print(f"β Failed to update report: {update_response.status_code}") |
|
|
return False |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Error updating report: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def test_confluence_integration(): |
|
|
"""Test Confluence integration with mock report""" |
|
|
client = DTOConfluenceClient() |
|
|
|
|
|
if not client.test_connection(): |
|
|
print("β Confluence integration test failed (expected without proper credentials)") |
|
|
return False |
|
|
|
|
|
|
|
|
test_run_data = { |
|
|
'run_id': 'test-confluence-001', |
|
|
'job_id': 'test-job-001', |
|
|
'data_class': 'CLASS_A', |
|
|
'environment': 'production', |
|
|
'final_status': 'SUCCESS', |
|
|
'manifest_path': '/manifests/class_a/test.yaml' |
|
|
} |
|
|
|
|
|
test_metrics = { |
|
|
'average_throughput_mbps': 734.2, |
|
|
'total_duration_seconds': 5400, |
|
|
'data_size_gb': 150.0, |
|
|
'transfer_method': 'ssh+dd', |
|
|
'source_host': 'vast2.adapt.ai', |
|
|
'target_host': 'vast1.adapt.ai', |
|
|
'validation_results': { |
|
|
'files_validated': 145, |
|
|
'files_passed': 145, |
|
|
'checksum_algorithm': 'SHA-256' |
|
|
}, |
|
|
'artifacts': [ |
|
|
'/logs/test-confluence-001.log', |
|
|
'/reports/test-confluence-001.pdf', |
|
|
'/checksums/test-confluence-001.sha256' |
|
|
] |
|
|
} |
|
|
|
|
|
report_url = client.create_run_report(test_run_data, test_metrics) |
|
|
|
|
|
if report_url: |
|
|
print(f"β
Confluence integration test completed: {report_url}") |
|
|
return True |
|
|
else: |
|
|
print("β Failed to create test report") |
|
|
return False |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("Testing DTO Confluence Integration...") |
|
|
print("=" * 50) |
|
|
|
|
|
test_confluence_integration() |
|
|
print("\nTo use Confluence integration, set these environment variables:") |
|
|
print("export CONFLUENCE_SERVER_URL=https://your-domain.atlassian.net/wiki") |
|
|
print("export CONFLUENCE_USERNAME=your-email@company.com") |
|
|
print("export CONFLUENCE_API_TOKEN=your-api-token") |