"""
Candidate Detail Page Module
Extracted from sud_promise_uab_theme.py
"""
import plotly.graph_objects as go
from datetime import datetime, timedelta
# Import UI constants
from const_ui import (
UAB_GREEN, UAB_DARK_GREEN, UAB_LIGHT_GREEN, UAB_ACCENT_TEAL, UAB_PALE_GREEN,
SCORE_THRESHOLD_HIGH, SCORE_THRESHOLD_MODERATE,
SMILES_DISPLAY_LENGTH, MAX_TARGET_DISPLAY,
get_status_badge, get_score_type_badge, get_evidence_stars, get_impact_badge
)
from const_config import INSTITUTION
def create_score_gauge(score: float, score_type: str, model_scores: dict = None) -> go.Figure:
"""Create a gauge chart for the evidence score"""
# Determine color based on score
if score >= SCORE_THRESHOLD_HIGH:
color = UAB_DARK_GREEN
elif score >= SCORE_THRESHOLD_MODERATE:
color = UAB_GREEN
else:
color = UAB_LIGHT_GREEN
fig = go.Figure(go.Indicator(
mode="gauge+number+delta",
value=score,
domain={'x': [0, 1], 'y': [0, 1]},
title={'text': f"Evidence Score ({score_type})",
'font': {'size': 16, 'family': "Times New Roman, serif", 'color': UAB_DARK_GREEN}},
number={'font': {'size': 40, 'family': "Times New Roman, serif"}},
gauge={
'axis': {'range': [0, 1], 'tickwidth': 1, 'tickcolor': UAB_DARK_GREEN},
'bar': {'color': color},
'bgcolor': "white",
'borderwidth': 2,
'bordercolor': UAB_DARK_GREEN,
'steps': [
{'range': [0, 0.5], 'color': '#FFE5E5'},
{'range': [0.5, 0.7], 'color': '#FFF4E5'},
{'range': [0.7, 1], 'color': UAB_PALE_GREEN}
],
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 0.7
}
}
))
fig.update_layout(
height=300,
margin=dict(l=20, r=20, t=60, b=20),
paper_bgcolor='white',
font={'family': 'Times New Roman, serif', 'color': UAB_DARK_GREEN}
)
return fig
def create_model_comparison_chart(model_scores: dict, score_type: str) -> go.Figure:
"""Create a bar chart comparing different model scores"""
if not model_scores or score_type != "Real":
# Return empty figure for synthetic scores
fig = go.Figure()
fig.update_layout(
title="Model Scores (Not Available for Synthetic Predictions)",
height=300,
paper_bgcolor='white',
font={'family': 'Times New Roman, serif', 'color': UAB_DARK_GREEN}
)
return fig
models = list(model_scores.keys())
scores = list(model_scores.values())
colors = [UAB_GREEN if s >= SCORE_THRESHOLD_HIGH else UAB_LIGHT_GREEN if s >= SCORE_THRESHOLD_MODERATE else '#FFA500' for s in scores]
fig = go.Figure(data=[
go.Bar(
x=models,
y=scores,
marker_color=colors,
text=[f'{s:.3f}' for s in scores],
textposition='outside',
textfont=dict(size=12, family="Times New Roman, serif", color=UAB_DARK_GREEN)
)
])
fig.update_layout(
title="Individual Model Predictions",
xaxis_title="",
yaxis_title="Prediction Score",
height=300,
margin=dict(l=40, r=20, t=50, b=80),
plot_bgcolor=UAB_PALE_GREEN,
paper_bgcolor='white',
font={'family': 'Times New Roman, serif', 'color': UAB_DARK_GREEN},
yaxis=dict(range=[0, 1]),
showlegend=False
)
fig.add_hline(y=0.5, line_dash="dash", line_color="orange", annotation_text="Threshold 0.5")
fig.add_hline(y=SCORE_THRESHOLD_HIGH, line_dash="dash", line_color=UAB_DARK_GREEN, annotation_text="Threshold 0.7")
return fig
def create_evidence_timeline(candidate):
"""Create timeline showing baseline + all project impacts"""
timeline_data = []
# Start with baseline (either ML/DL ensemble or random baseline)
start_date = candidate.attached_projects[0].added_date - timedelta(days=30) if candidate.attached_projects else datetime.now() - timedelta(days=365)
if candidate.score_type == "Real":
timeline_data.append({
'date': start_date,
'score': candidate.baseline_score,
'label': f'ML/DL Ensemble Baseline: {candidate.baseline_score:.3f}',
'color': UAB_DARK_GREEN,
'impact': 0
})
else:
timeline_data.append({
'date': start_date,
'score': candidate.baseline_score,
'label': 'Baseline (No Evidence)',
'color': '#A0AEC0',
'impact': 0
})
# Add each project's impact
cumulative_score = candidate.baseline_score
for project in candidate.attached_projects:
cumulative_score += project.impact_score
cumulative_score = max(0.20, min(0.95, cumulative_score))
marker_color = UAB_GREEN if project.impact_score > 0 else '#DC2626'
timeline_data.append({
'date': project.added_date,
'score': cumulative_score,
'label': f'+ {project.name}' if project.impact_score > 0 else f'- {project.name}',
'color': marker_color,
'impact': project.impact_score,
'impact_text': f'+{project.impact_score:.2f}' if project.impact_score > 0 else f'{project.impact_score:.2f}'
})
fig = go.Figure()
# Draw line segments
for i in range(len(timeline_data) - 1):
segment_color = timeline_data[i+1]['color']
fig.add_trace(go.Scatter(
x=[timeline_data[i]['date'], timeline_data[i+1]['date']],
y=[timeline_data[i]['score'], timeline_data[i+1]['score']],
mode='lines',
line=dict(color=segment_color, width=3),
showlegend=False,
hoverinfo='skip'
))
# Draw markers
fig.add_trace(go.Scatter(
x=[d['date'] for d in timeline_data],
y=[d['score'] for d in timeline_data],
mode='markers',
marker=dict(
size=12,
color=[d['color'] for d in timeline_data],
line=dict(width=2, color='white')
),
text=[d['label'] for d in timeline_data],
hovertemplate='%{text}
Score: %{y:.3f}
%{x|%b %d, %Y}
Please select a candidate
", None, None, None, "" candidate_name = candidate_selection.split(" (Score:")[0].strip() category_name = category_selection.split(" (")[0].strip() if category_selection else None if category_name: candidate = next((c for c in CANDIDATES if c.drug_name == candidate_name and c.target_sud_subtype == category_name), None) else: candidate = next((c for c in CANDIDATES if c.drug_name == candidate_name), None) if not candidate: return "Candidate not found
", None, None, None, "" stars = get_evidence_stars(candidate.evidence_score) status_badge = get_status_badge(candidate.stage) score_type_badge = get_score_type_badge(candidate.score_type) timeline_fig = create_evidence_timeline(candidate) gauge_fig = create_score_gauge(candidate.evidence_score, candidate.score_type, candidate.model_scores) model_comparison_fig = create_model_comparison_chart(candidate.model_scores, candidate.score_type) # Evidence Evolution section html_evolution = "" if candidate.score_type == "Real": html_evolution = f"""ML/DL Ensemble Baseline: {candidate.baseline_score:.3f} 🤖
Current Score (with Projects): {candidate.evidence_score:.3f}
Evidence Contribution: +{candidate.evidence_score - candidate.baseline_score:.3f} ({((candidate.evidence_score - candidate.baseline_score) / candidate.baseline_score * 100):.1f}%)
Starting from ML/DL ensemble prediction, additional evidence projects further refine confidence.
Baseline Score: {candidate.baseline_score:.2f} (no evidence)
Current Score: {candidate.evidence_score:.2f}
Total Improvement: +{candidate.evidence_score - candidate.baseline_score:.2f} ({((candidate.evidence_score - candidate.baseline_score) / candidate.baseline_score * 100):.1f}%)
Repositioning Candidate for {candidate.target_sud_subtype}
{INSTITUTION} - SUD-PROMISE
Current Indication: {candidate.current_indication}
Target SUD: {candidate.target_sud_subtype}
Disease ID: {candidate.disease_id if candidate.disease_id else 'Not Mapped'}
Mechanism of Action: {candidate.mechanism}
SMILES: {candidate.smiles[:SMILES_DISPLAY_LENGTH]}{'...' if len(candidate.smiles) > SMILES_DISPLAY_LENGTH else ''}
Protein Targets: {', '.join(candidate.protein_targets[:MAX_TARGET_DISPLAY]) if candidate.protein_targets else 'Not specified'}{' (...)' if len(candidate.protein_targets) > MAX_TARGET_DISPLAY else ''}
Projects contributing to prediction confidence
""" for project in candidate.attached_projects: impact_badge = get_impact_badge(project.impact_score) border_color = UAB_GREEN if project.impact_score > 0 else '#DC2626' html_after_plot += f"""Type: {project.project_type}
Sample Size: {project.sample_size:,} patients
Status: {project.status}
Added: {format_date_ago(project.added_date)}
"{project.summary}"