Spaces:
Running
Running
| """ | |
| ## src/app/gradio_app.py | |
| # CHANGE LOG | |
| # 2026-04-12 | |
| - Evidence not recomputing on historical upload | |
| - Key insights parsed week wrong message run_count current week / number of weeks | |
| - positioning_headline_building - refine message, not taking into account historical data, or first run ever | |
| - PeriodComparison: couldn't remove as still used by goal_process (review against context) | |
| - Visualisation agent not generating charts for past weeks - FIXED | |
| - goal_progress re-compute after new uploads | |
| - set_runner_goal (SnapshotService.recompute_week(...) exposes goal_progress) - recompute week against new Goal | |
| - goal_services (compute_goal_progress), goal_progress_service (call goal_services), goal_trajectory_service (calls GoalTrajectoryEngine.compute()) seems all the same thing? | |
| - intelligence_step._run_brief calling goal_progress_service.compute() - re-use context.goal_progress???? | |
| - wrap_set_goal recomputing goal_progress calling goal_progress_service.compute() - needs review | |
| - orchestrator.get_intelligence_snapshot disabled fallback to latest_snapshot if available | |
| - context.structure_status NOT being updated - run_dist_km >= (0.8 * session.target_distance_km) rule not being met | |
| - orch.analysis_repo.get_last_analysis() - WHY this? review | |
| - upload_runs β Error: Orchestrator failed to process runs. - FIXED (missing return) | |
| - on change week not changing UI after upload new runs / working on app load / weekly structure weekday run not correct (last week view) - FIXED | |
| - upload_run while on "last week" view mode keeps Home UI on current week - FIXED | |
| - upload of runs for an existent week replaces the existent week_snapshot (id) with a new one weekly_builder.build(), snapshot_services.recompute_week() | |
| """ | |
| import sys | |
| import os | |
| import asyncio | |
| from pathlib import Path | |
| from datetime import date, timedelta | |
| # Add the 'src' directory to sys.path to support both direct run and 'python -m' | |
| src_path = str(Path(__file__).parent.parent) | |
| if src_path not in sys.path: | |
| sys.path.insert(0, src_path) | |
| import gradio as gr | |
| from _app.presentation.ui_text import UI_TEXT, get_text | |
| from gradio import themes | |
| from typing import List, Dict, Any, Tuple, Optional | |
| import uuid | |
| from observability import components as obs_components | |
| from ingestion.loader import load_runs_from_uploaded_files, load_runs_from_folder | |
| from agents.orchestrator import get_orchestrator, RunnerOrchestrator | |
| from engines.period_comparison_engine import compute_period_comparison | |
| from services.performance_card_service import CardViewModel | |
| import logging | |
| from observability import logger as obs_logger | |
| from _app.ui.coaching_helpers import is_current_week, format_positioning_metrics, get_baseline_aware_target, interpret_week | |
| # Configure logging | |
| logger = logging.getLogger(__name__) | |
| # --- UI HELPERS --- | |
| def format_pace(seconds_per_km: float) -> str: | |
| if not seconds_per_km or seconds_per_km <= 0: | |
| return "N/A" | |
| minutes = int(seconds_per_km // 60) | |
| seconds = int(seconds_per_km % 60) | |
| return f"{minutes}:{seconds:02d} /km" | |
| # --- UI Localization Text moved to domain.i18n --- | |
| def format_plan_structured( | |
| plan_dict: Dict[str, Any], language: str = "en" | |
| ) -> Tuple[str, str, str, str, str, str]: | |
| if plan_dict is None: | |
| return "", "", "", "", "", "" | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| is_pt = language == "pt-BR" | |
| plan = plan_dict.get("plan", plan_dict) if isinstance(plan_dict, dict) else {} | |
| if not isinstance(plan, dict): | |
| return "", "", "", "", "", "" | |
| # --- BASELINE-AWARE TRAINING PLAN (UI OVERRIDE) --- | |
| baseline = 0 | |
| if isinstance(plan_dict, dict) and "orch" in plan_dict: | |
| orch = plan_dict["orch"] | |
| import config | |
| from domain.runner.profile import RunnerProfile | |
| profile = orch.runner_repo.get_runner_profile(uuid.UUID(config.DEFAULT_RUNNER_ID)) | |
| if profile: | |
| baseline = profile.baseline_weekly_km | |
| week_summary = str(plan.get("week_summary", "")).strip() | |
| focus = str(plan.get("focus", "")).strip() | |
| safety = plan.get("safety_warning", {}) or {} | |
| level = str(safety.get("level", "NONE")).strip().upper() | |
| message = str(safety.get("message", "")).strip() | |
| safety_banner_md = "" | |
| if level and level != "NONE": | |
| risk_label = "RISCO" if is_pt else "RISK" | |
| safety_label = "AVISO DE SEGURANΓA" if is_pt else "SAFETY WARNING" | |
| safety_banner_md = f"β οΈ {safety_label}: {level} {risk_label}" | |
| if message: | |
| safety_banner_md = f"{safety_banner_md}\n\n{message}" | |
| plan_hero_md = "" | |
| if focus or week_summary: | |
| focus_label = "Foco desta semana:" if is_pt else "This week's focus:" | |
| summary_label = "Resumo da semana:" if is_pt else "Week summary:" | |
| plan_hero_md = f"**{focus_label}** {focus}\n\n**{summary_label}** {week_summary}".strip() | |
| adjustments_md = "" | |
| adjustments = plan.get("recommended_adjustments") or [] | |
| if isinstance(adjustments, list) and adjustments: | |
| adj_label = "### Ajustes Recomendados" if is_pt else "### Recommended Adjustments" | |
| adjustments_md = adj_label | |
| for item in adjustments: | |
| text = str(item).strip() | |
| if text: | |
| adjustments_md += f"\n- {text}" | |
| notes_md = "" | |
| notes = plan.get("notes") or [] | |
| if isinstance(notes, list) and notes: | |
| notes_label = "### Notas" if is_pt else "### Notes" | |
| notes_md = notes_label | |
| for item in notes: | |
| text = str(item).strip() | |
| if text: | |
| notes_md += f"\n- {text}" | |
| weeks = plan.get("weeks") or [] | |
| body_lines: List[str] = [] | |
| if isinstance(weeks, list): | |
| for week in weeks: | |
| if not isinstance(week, dict): | |
| continue | |
| week_num = week.get("week", "") | |
| week_label = "Semana" if is_pt else "Week" | |
| body_lines.append(f"### {week_label} {week_num}") | |
| body_lines.append("") | |
| day_header = "Dia" if is_pt else "Day" | |
| activity_header = "Atividade" if is_pt else "Activity" | |
| body_lines.append(f"| {day_header} | {activity_header} |") | |
| body_lines.append("| :--- | :--- |") | |
| days = week.get("days") or [] | |
| if isinstance(days, list): | |
| for day in days: | |
| if not isinstance(day, dict): | |
| continue | |
| day_name = str(day.get("day", "")).strip() | |
| # Optionally translate day names if the LLM didn't (common with schema constraints) | |
| activity = str(day.get("activity", "")).strip() | |
| body_lines.append(f"| **{day_name}** | {activity} |") | |
| total_km = week.get("total_km", "") | |
| progression = str(week.get("progression", "")).strip() | |
| # Application of Baseline-Aware Rule for display | |
| display_total = total_km | |
| if baseline and total_km != "": | |
| try: | |
| target = get_baseline_aware_target(float(total_km), baseline) | |
| if target > float(total_km): | |
| # Rebuild scenario detected | |
| display_total = f"~{target - 2:.1f}β{target:.1f}" | |
| # Check if we should override progression message | |
| if float(total_km) < baseline * 0.8: | |
| progression = t.get("rebuilding_consistency_msg", progression) | |
| except (ValueError, TypeError): | |
| pass | |
| if display_total != "": | |
| body_lines.append("") | |
| body_lines.append(f"**Total:** {display_total} km") | |
| if progression: | |
| body_lines.append("") | |
| prog_label = "ProgressΓ£o:" if is_pt else "Progression:" | |
| body_lines.append(f"**{prog_label}** {progression}") | |
| body_lines.append("") | |
| plan_body_md = "\n".join([line for line in body_lines if line is not None]).strip() | |
| return safety_banner_md, plan_hero_md, adjustments_md, notes_md, plan_body_md, week_summary | |
| def render_next_run(recommendation, language="en", visible=True): | |
| """Formats the recommendation for the Next Run Card.""" | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| if not recommendation: | |
| return gr.update(value=t.get("next_run.upload_runs_prompt", ""), visible=visible) | |
| focus = getattr(recommendation, "training_focus" or "focus", None) or recommendation.get("training_focus") or recommendation.get("focus") | |
| session = getattr(recommendation, "training_type", None) or recommendation.get("training_type") or recommendation.get("session_type") | |
| explanation = getattr(recommendation, "training_why", None) or recommendation.get("training_why") or recommendation.get("description") | |
| if not session: | |
| return gr.update(value=t.get("next_run.set_goal_prompt", ""), visible=visible) | |
| text = f""" | |
| **{t.get("next_run.session", "Suggested Session")}** | |
| {session} | |
| **{t.get("next_run.focus", "Focus")}** | |
| {focus} | |
| **{t.get("next_run.why", "Why")}** | |
| {explanation} | |
| """ | |
| return gr.update(value=text, visible=visible) | |
| async def create_orchestrator(): | |
| import config | |
| """Creates and initializes the RunnerOrchestrator orchestrator.""" | |
| orchestrator = await get_orchestrator(use_RunnerOrchestrator=True) | |
| await orchestrator.run([]) if config.is_persistence_enabled() else None | |
| return orchestrator | |
| async def get_runner_profile(orchestrator: RunnerOrchestrator) -> Optional[Dict[str, Any]]: | |
| if not orchestrator.runner_repo: | |
| return None | |
| import config | |
| profile = orchestrator.dto_response.profile | |
| return profile if profile else None | |
| async def save_runner_profile( | |
| orchestrator: RunnerOrchestrator, | |
| display_name, | |
| age, | |
| experience, | |
| notes, | |
| baseline, | |
| gender, | |
| ): | |
| if not orchestrator.runner_repo: | |
| return "Storage disabled", None | |
| import config | |
| from domain.runner.profile import RunnerProfile | |
| from datetime import datetime | |
| existing = orchestrator.runner_repo.get_runner_profile(uuid.UUID(config.DEFAULT_RUNNER_ID)) | |
| profile = RunnerProfile( | |
| runner_id=uuid.UUID(config.DEFAULT_RUNNER_ID), | |
| runner_display_name=display_name or None, | |
| age=int(age) if age else None, | |
| experience_level=experience if experience else None, | |
| injury_history_notes=notes or None, | |
| created_at=existing.created_at if existing else datetime.now(), | |
| updated_at=datetime.now(), | |
| baseline_weekly_km=float(baseline) if baseline else None, | |
| gender=gender if gender else None, | |
| ) | |
| try: | |
| orchestrator.runner_repo.save(profile) | |
| return "Profile saved successfully!", profile.model_dump() | |
| except Exception as e: | |
| return f"Error: {str(e)}", None | |
| async def set_runner_goal( | |
| orchestrator: RunnerOrchestrator, goal_type, target_value, target_unit, target_date | |
| ): | |
| if not orchestrator.goal_repo: | |
| return "Storage disabled", None | |
| import config | |
| from domain.runner.goal import Goal | |
| from datetime import datetime | |
| try: | |
| dt = None | |
| if target_date: | |
| try: | |
| dt = datetime.fromisoformat(target_date) | |
| except: | |
| pass | |
| goal = orchestrator.goal_service.create_goal( | |
| runner_id=uuid.UUID(config.DEFAULT_RUNNER_ID), | |
| goal_type=goal_type, | |
| target_value=float(target_value) if target_value else 0.0, | |
| unit=target_unit, | |
| target_date=dt, | |
| ) | |
| # Trigger invalidation for recent weeks to ensure subsequent UI fetch rebuilds them | |
| latest_weeks = orchestrator.weekly_repo.get_last_n(4) if orchestrator.weekly_repo else [] | |
| weeks = [w.week_start_date for w in latest_weeks] | |
| if weeks: | |
| orchestrator.snapshot_service.invalidate_weeks(str(uuid.UUID(config.DEFAULT_RUNNER_ID)), weeks) | |
| return "Goal updated!", goal.model_dump() | |
| except Exception as e: | |
| return f"Error: {str(e)}", None | |
| def has_analysis_context(orch: RunnerOrchestrator) -> bool: | |
| """Helper to check if any analysis context (persisted or in-memory) exists.""" | |
| if not orch: | |
| return False | |
| # Check in-memory feature store first | |
| if orch.feature_store and orch.feature_store.get_all_features(): | |
| return True | |
| # Check persisted storage if enabled | |
| if orch.analysis_repo: | |
| try: | |
| last = orch.analysis_repo.get_last_analysis() | |
| if last and int(last.run_summary['avg_runs_per_week']) > 0: | |
| return True | |
| except Exception: | |
| pass | |
| return False | |
| async def process_runs_with_orchestrator( | |
| runs: List[dict], orchestrator: RunnerOrchestrator, language: str = "en" | |
| ): | |
| """ | |
| Compute features, weekly trends, insights, and plan for a list of runs using the provided orchestrator. | |
| """ | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| if not runs: | |
| return {"error": t.get("error_no_runs", "No runs found.")} | |
| # Run orchestration | |
| response = await orchestrator.run(runs, language=language) | |
| if response: | |
| # Extract metadata for reassurance | |
| num_runs = len(runs) | |
| # Calculate weeks... | |
| weeks = 0 | |
| if runs: | |
| # ... (weeks calculation remains same) | |
| features = getattr(response, "features", []) | |
| if features: | |
| dates = [f.get("start_time") for f in features if f.get("start_time")] | |
| if dates: | |
| from datetime import datetime | |
| dt_dates = [] | |
| for d in dates: | |
| if isinstance(d, datetime): | |
| dt_dates.append(d) | |
| elif isinstance(d, str): | |
| try: | |
| dt_dates.append(datetime.fromisoformat(d.replace("Z", "+00:00"))) | |
| except: | |
| pass | |
| if dt_dates: | |
| delta = max(dt_dates) - min(dt_dates) | |
| weeks = max(1, (delta.days // 7) + 1) | |
| # Format insights: Primary Lever vs others | |
| raw_insights = getattr(response, "insights", {}) | |
| # Primary Lever | |
| lever_data = raw_insights.get("primary_lever", {}) | |
| lever_msg = lever_data.get("message", t["insights_no_primary_lever"]) | |
| lever_constraint = lever_data.get("constraint") | |
| primary_lever = f"{lever_msg}" | |
| if lever_constraint: | |
| primary_lever += f"\n\n**{t['insights_constraint_label']}:** {lever_constraint}" | |
| # Gather key observations and risk signal for details | |
| risk_data = raw_insights.get("risk_signal", {}) | |
| risk_msg = risk_data.get("message", t["insights_no_risk"]) | |
| risk_evidence = risk_data.get("evidence", []) | |
| risk_text = f"{risk_msg}" | |
| if risk_evidence: | |
| evidence_label = t["insights_evidence_label"] | |
| risk_text += f"\n\n**{evidence_label}:**\n" + "\n".join( | |
| [f"- {e}" for e in risk_evidence] | |
| ) | |
| obs = raw_insights.get("key_observations", []) | |
| obs_items = [] | |
| for o in obs: | |
| msg = o.get("message") | |
| if msg: | |
| item = f"- {msg}" | |
| ev = o.get("evidence", []) | |
| if ev: | |
| evidence_label = t["insights_evidence_label"] | |
| item += f" ({evidence_label}: " + ", ".join(ev) + ")" | |
| obs_items.append(item) | |
| obs_text = "\n".join(obs_items) if obs_items else t["insights_no_observations"] | |
| summary_msg = (raw_insights.get("summary") or {}).get("message", "") | |
| risk_heading = t["insights_risk_signal_heading"] | |
| obs_heading = t["insights_key_observations_heading"] | |
| details_text = f"### {risk_heading}\n{risk_text}\n\n### {obs_heading}\n{obs_text}" | |
| if summary_msg: | |
| details_text += f"\n\n---\n*{summary_msg}*" | |
| # Localized timestamp | |
| from datetime import datetime | |
| timestamp_lbl = t["insights_timestamp_label"] | |
| current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC") | |
| details_text += f"\n\n<div class='muted' style='font-size: 0.8rem;'>{timestamp_lbl} {current_time}</div>" | |
| return { | |
| "num_runs": num_runs, | |
| "weeks": weeks, | |
| "primary_lever": primary_lever, | |
| "details_insights": details_text, | |
| "plan": getattr(response, "plan", None), | |
| "charts": getattr(response, "charts", None), | |
| "risk_assessment": getattr(response, "risk_assessment", None), | |
| } | |
| elif response and "error" in response: | |
| return {"error": response["error"]} | |
| else: | |
| return {"error": "Orchestrator failed to process runs."} | |
| async def _empty_results(orch, charts, selected: str = "id_analyse", language: str = "en"): | |
| """Return a full 45-item tuple matching the outputs list (including positioning).""" | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| risk_heading = t["insights_risk_signal_heading"] | |
| lever_heading = t["insights_primary_lever_heading"] | |
| pending = t["insights_analysis_pending"] | |
| raw_default = t["insights_raw_default"] | |
| plan_pending = t["plan_pending"] | |
| has_ctx = has_analysis_context(orch) | |
| indicator_val = t["chat_indicator_no"] | |
| indicator_visible = not has_ctx | |
| # Optional: If persisted exists, show subtle info | |
| #if orch and hasattr(orch, "analysis_repo") and orch.analysis_repo: | |
| # try: | |
| # last = orch.analysis_repo.get_last_analysis() | |
| # if last: | |
| # ts = last.created_at.strftime("%Y-%m-%d %H:%M") | |
| # indicator_val = f"{t.get('insights_timestamp_label', 'Analysis from')} {ts}" | |
| # indicator_visible = True | |
| # except Exception: | |
| # pass | |
| return ( | |
| gr.update(value="", visible=False), # reassurance_msg | |
| gr.update(value=f"### {lever_heading}\n{pending}"), # lever_box | |
| gr.update(value=f"### {risk_heading}\n{raw_default}"), # details_box | |
| gr.update(value="", visible=False), # safety_banner | |
| gr.update(value="", visible=False), # plan_hero | |
| gr.update(value=""), # adjustments_box | |
| gr.update(value=""), # notes_box | |
| gr.update(value=plan_pending), # plan_body | |
| None, # pace_plot | |
| None, # hr_plot | |
| "N/A", # risk_level_box | |
| gr.update(value="", interactive=has_ctx), # msg | |
| gr.update(value=indicator_val, visible=indicator_visible), # context_indicator | |
| orch, # orchestrator_state | |
| charts, # charts_state | |
| gr.update(selected=selected), # tabs | |
| selected, # selected_tab | |
| gr.update( | |
| choices=[(t["current_week_label"], "current"), (t["last_week_label"], "last")], | |
| value="current", | |
| ), # home_week_toggle | |
| # Home dashboard updates (14 values) | |
| "", # home_week_range | |
| "", # home_comparison_table | |
| "", # home_brief_content | |
| "", # home_focus_content | |
| #"", # home_snapshot_md | |
| "", # home_snapshot_metrics | |
| "", # home_structure_metrics | |
| gr.update(value=""), # home_goal_summary | |
| gr.update(value=""), # home_goal_status | |
| gr.update(value=0), # home_goal_progress_bar | |
| gr.update(value="", visible=False), # home_card_html | |
| None, # home_card_state | |
| gr.update(visible=False), # btn_download_card | |
| #gr.update(value=t["sec_evolution"]), # home_evolution_md | |
| # Next Run Card + Coaching Components + Groups | |
| gr.update(value=t.get("next_run.upload_runs_prompt", ""), visible=False), | |
| gr.update(value="", visible=False), # home_coach_insight_md | |
| gr.update(value="", visible=False), # home_health_signal_md | |
| gr.update(value="", visible=False), # home_goal_trajectory_md | |
| gr.update(value="", visible=False), # home_evidence_md | |
| gr.update(value="", visible=False), # home_recommendation_md | |
| gr.update(visible=False), # home_insights_group | |
| gr.update(visible=False), # home_evidence_group | |
| gr.update(visible=False), # home_rec_group | |
| gr.update(visible=False), # home_goal_group | |
| gr.update(visible=False), # home_structure_group | |
| gr.update(visible=False), # 43: performance_card_group | |
| gr.update(visible=False), # 44: home_next_run_header | |
| gr.update(visible=False), # 45: your_week_group | |
| gr.update(value=None), # 46: pace_plot | |
| gr.update(value=None), # 47: hr_plot | |
| # Positioning updates (16 values) | |
| *(await _format_positioning_dashboard(orch, language=language)), | |
| ) | |
| async def reset_ui(orch, language="en"): | |
| """Resets the UI and the orchestrator state.""" | |
| if orch: | |
| orch.reset() | |
| # _empty_results returns a 62-item tuple | |
| empty = await _empty_results(orch, {}, selected="id_analyse", language=language) | |
| # reset_btn.click outputs: | |
| # 1. upload, 2. run_btn, 3-64. analysis/home/pos (62 items), | |
| # 65. download, 66. tab_home, 67. tab_intelligence, 68. tab_analyse, 69. chatbot | |
| return ( | |
| gr.update(value=None), # 1. upload | |
| gr.update(interactive=False, variant="secondary"), # 2. run_btn | |
| *empty, # 3-64. (62 items) | |
| gr.update(value=None, visible=False), # 65. home_card_download_file | |
| gr.update(visible=False), # 66. tab_home | |
| gr.update(visible=False), # 67. tab_intelligence | |
| gr.update(visible=True), # 68. tab_analyse | |
| gr.update(value=[]), # 69. chatbot | |
| ) | |
| def _split_plan_sections(plan_md: str): | |
| import re | |
| text = (plan_md or "").strip() | |
| if not text: | |
| return "", "", "", "", "" | |
| lines = text.splitlines() | |
| safety_keywords = ("safety warning", "high risk", "risk detected") | |
| safety_line = "" | |
| for i, line in enumerate(lines): | |
| if any(k in line.lower() for k in safety_keywords): | |
| safety_line = line.strip() | |
| lines[i] = "" | |
| break | |
| adjustments_lines = [] | |
| notes_lines = [] | |
| remaining = [] | |
| in_adjustments = False | |
| in_notes_block = False | |
| for line in lines: | |
| lower = line.lower() | |
| if "recommended adjustments:" in lower: | |
| in_adjustments = True | |
| adjustments_lines.append(line.strip()) | |
| continue | |
| if in_adjustments: | |
| if not line.strip(): | |
| in_adjustments = False | |
| continue | |
| if line.lstrip().startswith(("-", "*")): | |
| adjustments_lines.append(line.strip()) | |
| continue | |
| in_adjustments = False | |
| if lower.strip() == "notes:": | |
| in_notes_block = True | |
| continue | |
| if in_notes_block: | |
| if not line.strip(): | |
| in_notes_block = False | |
| continue | |
| if line.lstrip().startswith(("-", "*")): | |
| notes_lines.append(line.strip()) | |
| continue | |
| in_notes_block = False | |
| if "note:" in lower: | |
| notes_lines.append(line.strip()) | |
| continue | |
| remaining.append(line) | |
| adjustments = "\n".join([l for l in adjustments_lines if l]).strip() | |
| notes = "\n".join([l for l in notes_lines if l]).strip() | |
| body = "\n".join([l for l in remaining if l.strip()]).strip() | |
| week_header_re = re.compile(r"^Week\s+(\d+):\s*$", re.IGNORECASE) | |
| formatted_body = body | |
| week_matches = list(week_header_re.finditer(body)) | |
| week_summary = "" | |
| if week_matches: | |
| body_lines = body.splitlines() | |
| week_starts = [] | |
| for idx, line in enumerate(body_lines): | |
| m = week_header_re.match(line.strip()) | |
| if m: | |
| week_starts.append((idx, m.group(1))) | |
| if week_starts: | |
| week_starts.append((len(body_lines), None)) | |
| out_lines = [] | |
| for i in range(len(week_starts) - 1): | |
| start_idx, week_num = week_starts[i] | |
| end_idx, _ = week_starts[i + 1] | |
| out_lines.append(f"### Week {week_num}") | |
| chunk_lines = body_lines[start_idx + 1 : end_idx] # Define chunk_lines here | |
| # Parse chunk to build Calendar Table | |
| table_data = [] # List of (Day, Activity) | |
| total_line = "" | |
| progression_text = "" | |
| days_order = {"mon": 0, "tue": 1, "wed": 2, "thu": 3, "fri": 4, "sat": 5, "sun": 6} | |
| day_regex = re.compile( | |
| r"^(?:[* -]*)(Mon|Tue|Wed|Thu|Fri|Sat|Sun)(?:(?:day)?)(?:[:\.]?)\s*(.*)", | |
| re.IGNORECASE, | |
| ) | |
| # Pre-process lines to handle both list and table formats | |
| for line in chunk_lines: | |
| clean = line.strip() | |
| if not clean: | |
| continue | |
| # 1. Check for Progression | |
| if "progression:" in clean.lower(): | |
| if "|" in clean: # Table row | |
| parts = [p.strip() for p in clean.split("|") if p.strip()] | |
| if len(parts) >= 2: | |
| progression_text = parts[1].replace("**", "").strip() | |
| else: # List/Text line | |
| # Remove "Progression:" prefix | |
| progression_text = re.sub( | |
| r"^[* -]*Progression:?\s*", "", clean, flags=re.IGNORECASE | |
| ).strip() | |
| continue | |
| # 2. Check for Total | |
| if "total:" in clean.lower(): | |
| if "|" in clean: | |
| parts = [p.strip() for p in clean.split("|") if p.strip()] | |
| if len(parts) >= 2: | |
| total_line = parts[1].replace("**", "").strip() | |
| else: | |
| total_line = re.sub( | |
| r"^[* -]*Total:?\s*", "", clean, flags=re.IGNORECASE | |
| ).strip() | |
| continue | |
| # 3. Check for Day (List format) | |
| m = day_regex.match(clean) | |
| if m: | |
| day_name = m.group(1).title() | |
| activity = m.group(2).strip() | |
| # Remove leading bullets/bold from activity if accidentally captured | |
| activity = re.sub(r"^[* -]+", "", activity).strip() | |
| table_data.append((day_name, activity)) | |
| continue | |
| # 4. Check for Day (Table format: | Mon | Run... |) | |
| if "|" in clean: | |
| parts = [p.strip() for p in clean.split("|") if p.strip()] | |
| if len(parts) >= 2: | |
| day_col = parts[0].replace("**", "").strip() | |
| # Check if first col is a day | |
| if day_col[:3].lower() in days_order: | |
| activity = parts[1].strip() | |
| table_data.append((day_col.title(), activity)) | |
| continue | |
| # Build Markdown Table if data exists | |
| if table_data: | |
| out_lines.append("| Day | Activity |") | |
| out_lines.append("| :--- | :--- |") | |
| # Sort by day if needed, but assuming input order is correct. | |
| # Just ensure we don't have duplicates or empty rows. | |
| for day, activity in table_data: | |
| out_lines.append(f"| **{day}** | {activity} |") | |
| else: | |
| # Fallback: Just dump the chunk lines if parsing failed | |
| out_lines.extend([l for l in chunk_lines if l.strip()]) | |
| if total_line: | |
| out_lines.append(f"\n**Total:** {total_line}") | |
| if progression_text: | |
| out_lines.append(f"\n**Progression:** {progression_text}") | |
| if i < len(week_starts) - 2: | |
| out_lines.append("") # Add a blank line between weeks | |
| formatted_body = "\n".join(out_lines).strip() | |
| week1_start = week_starts[0][0] if week_starts else None | |
| if week1_start is not None: | |
| for line in body_lines[week1_start + 1 :]: | |
| if line.lstrip().startswith(("-", "*")): | |
| week_summary = line.lstrip("-* ").strip() | |
| break | |
| if week_header_re.match(line.strip()): | |
| break | |
| return safety_line, adjustments, notes, formatted_body, week_summary | |
| def _extract_plan_json(text: str) -> Optional[Dict[str, Any]]: | |
| import json | |
| if not text: | |
| return None | |
| start = text.find("{") | |
| end = text.rfind("}") | |
| if start == -1 or end == -1 or end <= start: | |
| return None | |
| candidate = text[start : end + 1] | |
| try: | |
| data = json.loads(candidate) | |
| except Exception: | |
| return None | |
| if not isinstance(data, dict): | |
| return None | |
| if "plan" in data and isinstance(data["plan"], dict): | |
| return data["plan"] | |
| if "weeks" in data: | |
| return data | |
| return None | |
| def format_week_range(monday_date: date, language="en"): | |
| """Formats a week range (Monday to Sunday) for display.""" | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| sunday_date = monday_date + timedelta(days=6) | |
| # Format dates based on language | |
| if language == "pt-BR": | |
| return f"{monday_date.strftime('%d/%m')} - {sunday_date.strftime('%d/%m')}" | |
| else: # Default to en | |
| return f"{monday_date.strftime('%b %d')} - {sunday_date.strftime('%b %d')}" | |
| def download_performance_card(card_view_model: Optional[CardViewModel]): | |
| if not card_view_model: | |
| return None | |
| from infrastructure.exporters.performance_card_exporter import PerformanceCardExporter | |
| exporter = PerformanceCardExporter() | |
| return exporter.export_to_png(card_view_model) | |
| def get_start_week_date(week_selection: str) -> date: | |
| import time | |
| today = date.today() | |
| current_monday = today - timedelta(days=today.weekday()) | |
| last_monday = current_monday - timedelta(days=7) | |
| return current_monday if week_selection == "current" else last_monday | |
| async def _format_home_dashboard(orch, language="en", week_selection="current"): | |
| """Helper to compute all strings for the home dashboard.""" | |
| import time | |
| start_time = time.time() | |
| import config | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| # Fallback/Safety Return Tuple | |
| def _error_fallback(header, msg): | |
| return ( | |
| header, # 0: header | |
| "", # 1: evolution | |
| msg, # 2: brief | |
| "", # 3: focus | |
| gr.update(visible=False), # 5: snapshot | |
| "", # 6: structure | |
| "", # 7: goal_summary | |
| "", # 8: goal_status | |
| gr.update(visible=False), # 9: progress | |
| gr.update(visible=False), # 10: card_html | |
| None, # 11: card_state | |
| gr.update(visible=False), # 12: download | |
| gr.update(visible=False), # 14: next_run | |
| gr.update(visible=False), # 15: insight | |
| gr.update(visible=False), # 16: health | |
| gr.update(visible=False), # 17: trajectory | |
| gr.update(visible=False), # 18: evidence | |
| gr.update(visible=False), # 19: rec | |
| gr.update(visible=False), # 20: insight_grp | |
| gr.update(visible=False), # 21: evidence_grp | |
| gr.update(visible=False), # 22: rec_grp | |
| gr.update(visible=False), # 23: goal_grp | |
| gr.update(visible=False), # 24: struct_grp | |
| gr.update(visible=False), # 23: perf_card_grp | |
| gr.update(visible=False), # 24: next_run_hdr | |
| gr.update(visible=False), # 25: week_grp | |
| gr.update(visible=False), # 26: pace_plot | |
| gr.update(visible=False), # 27: hr_plot | |
| ) | |
| try: | |
| # Resolve target week | |
| target_monday = get_start_week_date(week_selection) | |
| # Week Range Header | |
| week_label_key = "current_week_label" if week_selection == "current" else "last_week_label" | |
| week_label = t.get(week_label_key, "Week") | |
| date_range_str = format_week_range(target_monday, language=language) | |
| week_header = f"### {week_label} ({date_range_str})" | |
| evolution_md = "" | |
| brief_content = "" | |
| focus_content = "" | |
| #snapshot_title = f"## {t.get('lbl_snapshot_title', 'Weekly Snapshot')}" | |
| snapshot_md = "" | |
| structure_md = "" | |
| goal_summary = "" | |
| goal_status_badge = "" | |
| goal_progress_val = 0 | |
| card_html = "" | |
| card_view_model = None | |
| snapshot = None | |
| rec_md = "" | |
| evidence_md = "" | |
| no_data_msg = t.get("no_data_available", "*No runs logged yet this week.*") | |
| context = await orch.get_or_build_intelligence_snapshot(target_monday, language=language) or {} | |
| # --- EARLY GUARD: Early Exit if no training data exists - first-time runners --- | |
| if not context: # Not persistance use-case first access | |
| logger.info(f"Home dashboard early exit (no data) render time: {time.time() - start_time:.2f}s") | |
| return ( | |
| week_header, # 0: home_week_range | |
| "", # 1: home_comparison_table | |
| t.get("welcome") if week_selection == "current" else no_data_msg, # 2: home_brief_content | |
| "", # 3: home_focus_content | |
| gr.update(visible=False), # 5: home_snapshot_metrics | |
| "", # 6: home_structure_metrics | |
| "", # 7: home_goal_summary | |
| "", # 8: home_goal_status | |
| gr.update(value=0, label=t["lbl_progress"]), # 9: home_goal_progress_bar | |
| gr.update(visible=False), # 10: home_card_html | |
| None, # 11: home_card_state | |
| gr.update(visible=False), # 12: btn_download_card | |
| render_next_run(None, language=language, visible=False), # 14: home_next_run_card | |
| gr.update(visible=False), # 15: home_coach_insight_md | |
| gr.update(visible=False), # 16: home_health_signal_md | |
| gr.update(visible=False), # 17: home_goal_trajectory_md | |
| gr.update(visible=False), # 18: home_evidence_md | |
| gr.update(visible=False), # 19: home_recommendation_md | |
| gr.update(visible=False), # 20: home_insights_group | |
| gr.update(visible=False), # 21: home_evidence_group | |
| gr.update(visible=False), # 22: home_rec_group | |
| gr.update(visible=False), # 23: home_goal_group | |
| gr.update(visible=False), # 24: home_structure_group | |
| gr.update(visible=False), # 25: performance_card_group | |
| gr.update(visible=False), # 26: home_next_run_header | |
| gr.update(visible=False), # 27: home_your_week_group | |
| gr.update(value=None), # 28: pace chart | |
| gr.update(value=None), # 29: hr chart | |
| ) | |
| # --- Recommendation from snapshot (product contract) --- | |
| recommendation = getattr(context, "recommendation", None) | |
| #obs_logger.log_event( | |
| # "debug", | |
| # f"Dashboard resolution for {target_monday}. Snapshot found: {bool(snapshot)}, History count: {len(latest_history)}", | |
| # event="dashboard_data_resolution", | |
| # component=obs_components.APPLICATION, | |
| # target_monday=str(target_monday), | |
| # from_repo=bool(orch.weekly_repo), | |
| # found=bool(snapshot), | |
| # history_count=len(latest_history), | |
| # session_keys=[str(k) for k in getattr(orch, "session_snapshots", {}).keys()] | |
| #) | |
| # Access Trend from context ONLY | |
| weekly_trend = (getattr(context, "weekly_trend", {}) or {}) | |
| comparison = weekly_trend.get("comparison_available", False) | |
| # Access Active Goal from context ONLY | |
| active_goal = (getattr(context, "active_goal", {}) or {}) | |
| # 3. Goal Logic - Use week-specific snapshot but ALWAYS resolve active goal | |
| # This ensures the target (e.g. 30km) is shown even if the week is empty (0% progress) | |
| goal_status_text = context.goal_status_text if context else "" | |
| if active_goal: | |
| gtype_key = f"goal_type_{active_goal['type']}" | |
| gtype_lbl = t.get(gtype_key, active_goal["type"].replace("_", " ").title()) | |
| goal_summary = f"**{gtype_lbl}:** {active_goal['target_value']} {active_goal['unit']}" | |
| status_key = active_goal["status"] | |
| status_lbl = t.get(f"goal_status_{status_key}", status_key) | |
| color = ( | |
| "green" | |
| if status_key == "on_track" | |
| else "orange" if status_key == "slightly_behind" else "red" | |
| ) | |
| goal_status_badge = f'<span style="color: {color}; font-weight: bold; font-size: 1.2rem;">{status_lbl}</span>' | |
| goal_progress_val = getattr(context, "goal_progress_pct", 0) | |
| else: | |
| # Home banner if no goal exists | |
| goal_summary = f"> [!IMPORTANT]\n> **{t['home_no_goal_banner']}**" | |
| goal_status_badge = "" | |
| goal_progress_val = 0 | |
| # 4. Content Blocks | |
| if context: | |
| # --- π COACH BRIEF SYNTHESIS --- | |
| brief_msg = "" | |
| focus_msg = "" | |
| # Prefer intelligence snapshot (product contract) | |
| brief_msg = getattr(context, "performance_brief", "") or "" | |
| focus_msg = getattr(context, "performance_focus", "") or "" | |
| focus_lbl = t.get("lbl_focus_inline") #, "**Focus:**") | |
| brief_content = "" | |
| if brief_msg: | |
| brief_content = f"{brief_msg}" | |
| if goal_status_text: | |
| brief_content = f"{brief_content}\n\n{goal_status_text}" if brief_content else goal_status_text | |
| if focus_msg: | |
| brief_content = f"{brief_content}\n\n{focus_lbl} {focus_msg}" if brief_content else f"{focus_lbl} {focus_msg}" | |
| focus_content = focus_msg | |
| evolution_md = context.performance_story if context.performance_story else "" | |
| # --- THIS WEEK (COMPACT SNAPSHOT) --- | |
| lbl_dist = t.get("lbl_distance", "Distance") | |
| lbl_runs = t.get("lbl_runs", "Runs") | |
| lbl_pace = t.get("lbl_avg_pace", "Avg Pace") | |
| lbl_cons = t.get("lbl_consistency", "Consistency") | |
| snapshot_md = f""" | |
| - **{lbl_dist}:** {context.weekly_distance_km:.1f} km | |
| - **{lbl_runs}:** {context.run_count} | |
| - **{lbl_pace}:** {format_pace(context.avg_pace)} | |
| - **{lbl_cons}:** {int(context.consistency_score)}% | |
| """.strip() | |
| structure_md = context.structure_view if context.structure_view else "" | |
| else: | |
| # No snapshot for this target week | |
| snapshot_md = no_data_msg | |
| structure_md = no_data_msg | |
| brief_content = no_data_msg | |
| focus_content = "" | |
| #evolution_md = no_data_msg | |
| # Final safety: if no goal data was found (e.g. no goal defined at all) | |
| if not goal_summary: | |
| goal_summary = no_data_msg | |
| # 5. Performance Card Generation | |
| if context.run_count >= 1: | |
| try: | |
| # STEP 1: UI must only call orchestrator. | |
| card_view_model = await orch.generate_performance_card( | |
| snapshot=context.weekly_snapshot, trend=weekly_trend, language=language | |
| ) | |
| # Generate HTML for card | |
| from _app.presentation.ui_text import get_text | |
| delta_label = get_text("delta_vs_4w", language) | |
| pace_label = get_text("lbl_avg_pace", language) | |
| hr_label = get_text("lbl_avg_hr", language) | |
| dist_label = get_text("lbl_total_distance", language) | |
| focus_lbl_static = get_text("focus_label", language) | |
| card_data = card_view_model.data | |
| insight = card_view_model.insight_paragraph | |
| focus = card_view_model.forward_focus | |
| hr_trend_val = f"{card_data.hr_delta:+.1f} bpm" if card_data.hr_delta is not None else "N/A" | |
| card_html = f""" | |
| <div class="performance-card"> | |
| <div class="metrics-grid"> | |
| <div class="metric-item"> | |
| <span class="m-val">{card_data.total_distance:.1f} km</span> | |
| <span class="m-lab">{dist_label}</span> | |
| </div> | |
| <div class="metric-item"> | |
| <span class="m-val">{card_data.delta_vs_avg:+.1f}%</span> | |
| <span class="m-lab">{delta_label}</span> | |
| </div> | |
| <div class="metric-item"> | |
| <span class="m-val">{card_data.pace_delta:+.1f} s/km</span> | |
| <span class="m-lab">{pace_label} Ξ</span> | |
| </div> | |
| <div class="metric-item"> | |
| <span class="m-val">{hr_trend_val}</span> | |
| <span class="m-lab">{hr_label} Ξ</span> | |
| </div> | |
| </div> | |
| <div class="divider"></div> | |
| <div class="insight-section"> | |
| <p>{insight}</p> | |
| <div class="forward-focus"> | |
| <strong>{focus_lbl_static}:</strong> {focus} | |
| </div> | |
| </div> | |
| </div> | |
| """.strip() | |
| except Exception as e: | |
| logger.error(f"Error building card data: {e}", exc_info=True) | |
| # --- POSITIONING FROM INTELLIGENCE SNAPSHOT --- | |
| positioning = (getattr(context, "positioning_view", {}) or {}) # single source of truth | |
| # Check if we have a valid snapshot with runs | |
| run_count = getattr(context, "run_count", 0) | |
| has_insight = run_count >= 3 | |
| has_goal = True if active_goal else False | |
| # Format Evidence Strip | |
| evidence_md = context.evidence_view if context.evidence_view else "" | |
| # --- Extract insights | |
| insights = getattr(context, "insights", {}) | |
| if isinstance(insights, dict) and insights: | |
| lines = [] | |
| # Primary lever | |
| pl = insights.get("primary_lever") | |
| if pl and isinstance(pl, dict): | |
| msg = pl.get("message") | |
| if msg: | |
| lines.append(f"β’ {msg}") | |
| # Risk signal | |
| risk = insights.get("risk_signal") | |
| if risk and isinstance(risk, dict): | |
| msg = risk.get("message") | |
| if msg: | |
| lines.append(f"β’ β οΈ {msg}") | |
| # Key observations | |
| obs = insights.get("key_observations", []) | |
| if isinstance(obs, list): | |
| for o in obs: | |
| msg = o.get("message") if isinstance(o, dict) else None | |
| if msg: | |
| lines.append(f"β’ {msg}") | |
| insight_md = "\n".join(lines) | |
| elif isinstance(insights, list): | |
| insight_md = "\n".join([f"β’ {item}" for item in insights]) | |
| elif insights: | |
| insight_md = str(insights) | |
| # Removed legacy visibility checks - using snapshot-first logic | |
| has_runs = bool(context and getattr(context, "run_count", 0) >= 1) | |
| has_insight = bool(context and getattr(context, "run_count", 0) >= 3) | |
| logger.info(f"Home dashboard render time: {time.time() - start_time:.2f}s") | |
| return ( | |
| week_header, # 0: home_week_range | |
| evolution_md, # 1: home_comparison_table | |
| brief_content if run_count else no_data_msg, # 2: home_brief_content | |
| focus_content, # 3: home_focus_content | |
| snapshot_md, # 5: home_snapshot_metrics | |
| structure_md, # 6: home_structure_metrics | |
| goal_summary, # 7: home_goal_summary | |
| goal_status_badge, # 8: home_goal_status | |
| gr.update(value=goal_progress_val, label=t["lbl_progress"]), # 9: home_goal_progress_bar | |
| gr.update(value=card_html, visible=bool(card_html)), # 10: home_card_html | |
| card_view_model, # 11: home_card_state | |
| gr.update(value=t["btn_download_card"], visible=bool(card_view_model)), # 12: btn_download_card | |
| render_next_run(recommendation, language=language, visible=has_runs), # 14: home_next_run_card | |
| gr.update(value=positioning.get("insight", ""), visible=has_insight), # 15: home_coach_insight_md | |
| gr.update(value=positioning.get("health_signal", ""), visible=has_insight), # 16: home_health_signal_md | |
| gr.update(value=positioning.get("goal_trajectory", ""), visible=has_insight), # 17: home_goal_trajectory_md | |
| gr.update(value=evidence_md, visible=True if evidence_md else False), # 18: home_evidence_md | |
| gr.update(value="", visible=False), # 19: home_recommendation_md (no rec_md in new logic) | |
| gr.update(visible=has_insight), # 20: home_insights_group | |
| gr.update(visible=has_runs if evidence_md else False), # 21: home_evidence_group | |
| gr.update(visible=False), # 22: home_rec_group | |
| gr.update(visible=has_goal if goal_summary else False), # 23: home_goal_group | |
| gr.update(visible=has_runs), # 24: home_structure_group | |
| gr.update(visible=has_insight), # 23: performance_card_group | |
| gr.update(visible=has_runs), # 24: home_next_run_header | |
| gr.update(visible=has_runs), # 25: your_week_group | |
| gr.update(value=(getattr(context, "charts", {}) or {}).get("pace_chart"), visible=bool((getattr(context, "charts", {}) or {}).get("pace_chart"))), # 26: pace_plot | |
| gr.update(value=(getattr(context, "charts", {}) or {}).get("hr_chart"), visible=bool((getattr(context, "charts", {}) or {}).get("hr_chart"))), # 27: hr_plot | |
| ) | |
| except Exception as e: | |
| logger.error(f"Error formatting home dashboard: {e}", exc_info=True) | |
| error_msg = f"β οΈ **Error updating dashboard**: {str(e)}" | |
| return _error_fallback(week_header, error_msg) | |
| def _resolve_health_signal(value, trend): | |
| # Harmonization safeguard | |
| if value == "OVERREACHING" and trend and abs(trend.hr_delta or 0) < 3: | |
| value = "STABLE" | |
| if not value or value == "UNKNOWN" or value in ("OPTIMAL", "RECOVERING"): | |
| value = "STABLE" | |
| icon_map = { | |
| "STABLE": "π’", | |
| "OVERREACHING": "π ", | |
| "STRAIN": "π΄", | |
| } | |
| return value, icon_map.get(value, "π’") | |
| def _resolve_position_status(value, trajectory): | |
| # Map domain enums to presentation enums | |
| mapping = { | |
| "AHEAD": "ADVANCING", | |
| "ON_TRACK": "STABLE", | |
| "FALLING_BEHIND": "DRIFTING", | |
| } | |
| value = mapping.get(value, value) | |
| if not value or value == "UNKNOWN": | |
| if trajectory == "IMPROVING": | |
| value = "ADVANCING" | |
| else: | |
| value = "STABLE" | |
| icon_map = { | |
| "ADVANCING": "π", | |
| "STABLE": "β", | |
| "DRIFTING": "π", | |
| } | |
| return value, icon_map.get(value, "β") | |
| def _resolve_goal_trajectory(value): | |
| # Map domain enums to presentation enums | |
| if value == "STABLE": | |
| value = "MAINTAINING" | |
| if not value or value == "UNKNOWN": | |
| value = "MAINTAINING" | |
| icon_map = { | |
| "IMPROVING": "π―", | |
| "MAINTAINING": "π―", | |
| "DECLINING": "β οΈ", | |
| } | |
| return value, icon_map.get(value, "π―") | |
| def _resolve_focus(value): | |
| # Map domain enums to presentation enums | |
| mapping = { | |
| "CONSISTENCY": "BUILD", | |
| "INTENSITY": "BUILD", | |
| "MAINTENANCE": "MAINTENANCE", | |
| "RECOVERY": "RECOVERY", | |
| } | |
| value = mapping.get(value, value) | |
| if not value or value == "UNKNOWN": | |
| value = "MAINTENANCE" | |
| icon_map = { | |
| "RECOVERY": "π", | |
| "BUILD": "π", | |
| "MAINTENANCE": "π‘", | |
| "REDUCE_LOAD": "β¬οΈ", | |
| } | |
| return value, icon_map.get(value, "π‘") | |
| async def _format_positioning_dashboard(orch, language="en", week_selection="current"): | |
| """Helper to compute markdown blocks for the dedicated Positioning tab.""" | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| no_data_msg = t.get("positioning_no_data", "*No training data available yet.*") | |
| header_md = f"### {t['runner_positioning_title']}" | |
| state_md_val = no_data_msg | |
| coach_insight = "" | |
| health_signal = "" | |
| goal_trajectory = "" | |
| phase_display = "" | |
| forward_focus = "" | |
| trajectory_md_val = "" | |
| insight_md_val = "" | |
| evidence_md_val = "" | |
| recommendation = None | |
| rec_md = "" | |
| # Resolve target week | |
| target_monday = get_start_week_date(week_selection) | |
| context = await orch.get_or_build_intelligence_snapshot(target_monday, language=language) | |
| if not context: | |
| return ( | |
| f"### {t['runner_positioning_title']}", | |
| "", # coach_insight | |
| t["positioning_insufficient_data"], # state_md (fallback) | |
| "", # health_signal | |
| "", # goal_trajectory | |
| "", # training_phase | |
| "", # forward_focus | |
| "", # trajectory | |
| "", # insight | |
| "", # evidence | |
| "", # recommendation | |
| "", | |
| "", | |
| "", | |
| "", | |
| "" | |
| ) | |
| # 1. Extract view from snapshot (Product Contract) | |
| view = { | |
| "headline": context.positioning_view.get("headline", ""), | |
| "state": context.positioning_view.get("state", ""), | |
| "health_signal": context.positioning_view.get("health_signal", ""), | |
| "goal_trajectory": context.positioning_view.get("goal_trajectory", ""), | |
| "forward_focus": context.positioning_view.get("forward_focus", ""), | |
| "trajectory": context.positioning_view.get("positioning_change", ""), | |
| "insight": context.positioning_view.get("insight", ""), | |
| "training_phase": context.positioning_view.get("training_phase", ""), | |
| "evidence": context.positioning_view.get("evidence", {}), | |
| } | |
| recommendation = getattr(context, "recommendation", None) | |
| rec_md = "" | |
| if recommendation: | |
| rec_md = f""" | |
| **{t.get('next_run.focus', 'Focus')}:** {recommendation.get('focus', "")} <br> | |
| **{t.get('next_run.session', 'Suggested Session')}:** {recommendation.get('session_type', "")} <br> | |
| **{t.get('next_run.why', 'Why')}:** {recommendation.get('description', "")} | |
| """.strip() | |
| # 2. Evidence Rendering (Simplified) | |
| evidence = view["evidence"] | |
| if isinstance(evidence, dict): | |
| evidence_md_val = "\n".join([ | |
| f"β’ {v.get('message')}" | |
| for v in evidence.values() | |
| if isinstance(v, dict) and v.get("message") | |
| ]) | |
| else: | |
| evidence_md_val = "" | |
| # 3. Headers and Displays | |
| from datetime import timedelta | |
| # Use snapshot date if available, else fallback | |
| snapshot_date = getattr(context, "week_start", date.today()) | |
| start = snapshot_date.strftime("%d %b") | |
| end = (snapshot_date + timedelta(days=6)).strftime("%d %b") | |
| week_range = f"{start} - {end}" | |
| header_md = f"### {week_range}" | |
| coach_insight = view['headline'] | |
| state_md_val = view['state'] | |
| health_signal = view['health_signal'] | |
| goal_trajectory = view['goal_trajectory'] | |
| forward_focus = view['forward_focus'] | |
| trajectory_md_val = view['trajectory'] | |
| insight_md_val = view['insight'] | |
| # Phase rendering | |
| phase_icons = { | |
| "base": "π", | |
| "build": "ποΈ", | |
| "peak": "π", | |
| "recovery": "π§", | |
| "plateau": "βͺ", | |
| } | |
| phase_key = f"positioning_phase_{view['training_phase']}" | |
| phase_name = UI_TEXT[language].get(phase_key, str(view["training_phase"]).capitalize()) | |
| phase_display = f"{phase_icons.get(view['training_phase'], 'β')} {phase_name}" | |
| # 4. Extract Weekly Plan | |
| plan_data = _extract_plan_json(context.plan) | |
| if plan_data: | |
| plan_data["orch"] = orch | |
| ( | |
| safety_line, | |
| plan_hero_value, | |
| adjustments, | |
| notes, | |
| plan_body_text, | |
| _week_summary, | |
| ) = format_plan_structured(plan_data, language=language) | |
| return ( | |
| header_md, | |
| coach_insight, | |
| state_md_val, | |
| health_signal, | |
| goal_trajectory, | |
| phase_display, | |
| forward_focus, | |
| trajectory_md_val, | |
| insight_md_val, | |
| evidence_md_val, | |
| rec_md, | |
| safety_line, | |
| plan_hero_value, | |
| adjustments, | |
| notes, | |
| plan_body_text, | |
| ) | |
| async def _process_runs(runs: List[Dict], torch, language="en", progress=None): | |
| if progress is None: | |
| progress = lambda x, desc="": None | |
| t = UI_TEXT.get(language, UI_TEXT["en"]) | |
| # Initialize orchestrator if needed | |
| #if orch is None: | |
| # progress(0, desc=t.get("progress_initializing", "Initializing Agentic Orchestrator...")) | |
| # torch = await create_orchestrator() | |
| #else: | |
| # torch = orch | |
| if not runs: | |
| progress(0, desc=t.get("progress_checking", "Checking files...")) | |
| empty = list(await _empty_results(torch, {}, selected="id_analyse", language=language)) | |
| # Update the reassurance message with the error | |
| empty[0] = gr.update(value=t["error_no_runs"], visible=True) | |
| return tuple(empty) | |
| progress(0.2, desc=t["progress_extracting"]) | |
| res = await process_runs_with_orchestrator(runs, torch, language=language) | |
| progress(0.8, desc=t["progress_generating"]) | |
| if isinstance(res, dict) and res.get("error"): | |
| empty = list(await _empty_results(torch, {}, selected="id_analyse", language=language)) | |
| fail_msg = t["insights_analysis_failed"] | |
| lever_heading = t["insights_primary_lever_heading"] | |
| # Update specific fields for the error state | |
| empty[0] = gr.update(value=f"β Error: {res['error']}", visible=True) | |
| empty[1] = gr.update(value=f"### {lever_heading}\n{fail_msg}") | |
| empty[10] = "Error" | |
| empty[13] = torch | |
| empty[14] = {} | |
| return tuple(empty) | |
| # Reassurance mapping | |
| parsed_label = t.get("lbl_parsed", "Parsed") | |
| runs_label = t.get("unit_runs", "run (s)") | |
| weeks_label = t.get("unit_weeks", "week (s)") | |
| across_label = t.get("lbl_across", "across") | |
| reassurance = f"β **{parsed_label}:** {res['num_runs']} {runs_label}" | |
| if res["weeks"] > 0: | |
| reassurance += f" {across_label} {res['weeks']} {weeks_label}" | |
| # Insights mapping | |
| lever_heading = t["insights_primary_lever_heading"] | |
| lever_content = f"{reassurance}\n\n### {lever_heading}\n{res['primary_lever']}" | |
| details_content = res["details_insights"] | |
| # Plan mapping | |
| plan_obj = res["plan"] | |
| plan_md = "" | |
| structured_plan = None | |
| if isinstance(plan_obj, dict): | |
| candidate = plan_obj.get("plan") if "plan" in plan_obj else plan_obj | |
| if isinstance(candidate, dict) and "weeks" in candidate: | |
| structured_plan = candidate | |
| else: | |
| plan_md = plan_obj.get("plan") or plan_obj.get("text") or str(plan_obj) | |
| else: | |
| plan_md = str(plan_obj) if plan_obj is not None else "" | |
| structured_plan = _extract_plan_json(plan_md) | |
| focus_lbl = t["plan_focus_label"] | |
| focus_content = f"**{focus_lbl}** {res['primary_lever']}" | |
| if structured_plan: | |
| ( | |
| safety_line, | |
| plan_hero_value, | |
| adjustments, | |
| notes, | |
| plan_body_text, | |
| _week_summary, | |
| ) = format_plan_structured(structured_plan, language=language) | |
| if not plan_hero_value: | |
| plan_hero_value = focus_content | |
| else: | |
| safety_line, adjustments, notes, plan_body_text, week_summary = _split_plan_sections( | |
| plan_md | |
| ) | |
| if week_summary: | |
| summary_lbl = t["plan_week_summary_label"] | |
| plan_hero_value = f"**{summary_lbl}** {week_summary}" | |
| else: | |
| plan_hero_value = focus_content | |
| # Charts | |
| charts_res = res.get("charts", {}) | |
| pace_fig = charts_res.get("pace_chart") | |
| hr_fig = charts_res.get("hr_chart") | |
| # Risk | |
| risk_data = res.get("risk_assessment") | |
| raw_risk = (risk_data["risk_level"] if risk_data else "LOW").upper() | |
| risk_level = t.get(f"risk_level_{raw_risk.lower()}", raw_risk) | |
| plan_fallback = t["plan_pending"] | |
| target_tab = "id_home" if res.get("weeks", 0) >= 8 else "id_intelligence" | |
| # Dashboard updates | |
| home_updates = list(await _format_home_dashboard(torch, language=language)) | |
| # Ensure home dashboard portion doesn't overwrite full charts with weekly ones | |
| # Indices 26 and 27 of home_updates correspond to pace_plot and hr_plot | |
| home_updates[26] = gr.update(value=pace_fig, visible=bool(pace_fig)) | |
| home_updates[27] = gr.update(value=hr_fig, visible=bool(hr_fig)) | |
| pos_updates = await _format_positioning_dashboard(torch, language=language) | |
| # 7. Final Return standardized order (65 items for full Dashboard refresh) | |
| # Order: [analysis_res (18)] + [home (28)] + [pos (16)] + [download (1)] + [tabs (2)] | |
| analysis_ui_updates = ( | |
| gr.update(visible=False), # 0: reassurance_msg | |
| gr.update(value=lever_content), # 1: lever_box | |
| details_content, # 2: details_box | |
| gr.update(value=safety_line, visible=bool(safety_line)), # 3: safety_banner | |
| gr.update(value=plan_hero_value, visible=bool(plan_hero_value)), # 4: plan_hero | |
| gr.update(value=adjustments, visible=bool(adjustments)), # 5: adjustments_box | |
| gr.update(value=notes, visible=bool(notes)), # 6: notes_box | |
| gr.update(value=plan_body_text or plan_fallback), # 7: plan_body | |
| gr.update(value=pace_fig, visible=bool(pace_fig)), # 8: pace_plot | |
| gr.update(value=hr_fig, visible=bool(hr_fig)), # 9: hr_plot | |
| risk_level, # 10: risk_level_box | |
| gr.update(interactive=True), # 11: msg | |
| gr.update(value=t["chat_indicator_yes"], visible=True), # 12: context_indicator | |
| torch, # 13: orchestrator_state | |
| {"pace": pace_fig, "hr": hr_fig}, # 14: charts_state (actually should be charts_state_upd) | |
| gr.update(selected=target_tab), # 15: tabs | |
| target_tab, # 16: selected_tab | |
| gr.update(None), # 17: home_week_toggle (reset) | |
| ) | |
| # home_updates[10] is the card_view_model (CardViewModel) | |
| card_view_model = home_updates[10] | |
| return ( | |
| *analysis_ui_updates, # 18 items | |
| *home_updates, # 28 items | |
| *pos_updates, # 16 items | |
| gr.update(value=None, visible=False), # 63: home_card_download_file (placeholder) | |
| gr.update(visible=True), # 64: tab_home | |
| gr.update(visible=True), # 65: tab_intelligence | |
| ) | |
| def build_interface(): #progress=gr.Progress() | |
| import time | |
| start_time = time.time() | |
| # Light theme + CSS polish (UI-only; no backend changes) | |
| CUSTOM_CSS = """ | |
| :root { | |
| --bg: #ffffff; | |
| --panel: #ffffff; | |
| --panel2: #f9fafb; | |
| --border: rgba(0,0,0,0.06); | |
| --border-strong: rgba(0,0,0,0.1); | |
| --text: #111827; | |
| --muted: #6b7280; | |
| --shadow: 0 6px 20px rgba(0,0,0,0.06); | |
| --shadow-sm: 0 2px 10px rgba(0,0,0,0.05); | |
| --upload-tint: rgba(59, 130, 246, 0.06); | |
| --insights-tint: rgba(168, 85, 247, 0.06); | |
| --plan-tint: rgba(34, 197, 94, 0.06); | |
| --chat-tint: rgba(0, 0, 0, 0.02); | |
| --card-bg-insights: rgba(255, 255, 255, 0.65); | |
| --warning-bg: rgba(245, 158, 11, 0.18); | |
| --warning-text: #b45309; | |
| --link: #2563eb; | |
| } | |
| /* Support for both system media query and Gradio's .dark class */ | |
| @media (prefers-color-scheme: dark) { | |
| :root { | |
| --bg: #0b0f19; | |
| --panel: #161b22; | |
| --panel2: #1c2128; | |
| --border: rgba(255,255,255,0.1); | |
| --border-strong: rgba(255,255,255,0.2); | |
| --text: #f0f6fc; | |
| --muted: #8b949e; | |
| --shadow: 0 6px 20px rgba(0,0,0,0.3); | |
| --shadow-sm: 0 2px 10px rgba(0,0,0,0.2); | |
| --upload-tint: rgba(59, 130, 246, 0.12); | |
| --insights-tint: rgba(168, 85, 247, 0.12); | |
| --plan-tint: rgba(34, 197, 94, 0.12); | |
| --chat-tint: rgba(255, 255, 255, 0.04); | |
| --card-bg-insights: rgba(30, 35, 45, 0.85); | |
| --warning-bg: rgba(245, 158, 11, 0.25); | |
| --warning-text: #fbbf24; | |
| --link: #58a6ff; | |
| } | |
| } | |
| .dark { | |
| --bg: #0b0f19; | |
| --panel: #161b22; | |
| --panel2: #1c2128; | |
| --border: rgba(255,255,255,0.1); | |
| --border-strong: rgba(255,255,255,0.2); | |
| --text: #f0f6fc; | |
| --muted: #8b949e; | |
| --shadow: 0 6px 20px rgba(0,0,0,0.3); | |
| --shadow-sm: 0 2px 10px rgba(0,0,0,0.2); | |
| --upload-tint: rgba(59, 130, 246, 0.12); | |
| --insights-tint: rgba(168, 85, 247, 0.12); | |
| --plan-tint: rgba(34, 197, 94, 0.12); | |
| --chat-tint: rgba(255, 255, 255, 0.04); | |
| --card-bg-insights: rgba(30, 35, 45, 0.85); | |
| --warning-bg: rgba(245, 158, 11, 0.25); | |
| --warning-text: #fbbf24; | |
| --link: #58a6ff; | |
| } | |
| .gradio-container { | |
| max-width: 1100px !important; | |
| background: var(--bg) !important; | |
| color: var(--text) !important; | |
| } | |
| /* Prose/Markdown targeting for legibility */ | |
| .prose, .prose * { color: var(--text) !important; } | |
| .prose p, .prose li, .prose span { color: var(--text) !important; } | |
| .prose strong { color: var(--text) !important; font-weight: 700; } | |
| .prose h1, .prose h2, .prose h3, .prose h4 { color: var(--text) !important; margin-top: 1rem; } | |
| .prose a { color: var(--link) !important; text-decoration: underline; } | |
| /* Card-like sections */ | |
| .section-card { | |
| border-radius: 16px; | |
| padding: 16px; | |
| border: 1px solid var(--border); | |
| box-shadow: var(--shadow); | |
| margin-bottom: 14px; | |
| background: var(--panel) !important; | |
| } | |
| /* Subtle section tints */ | |
| .upload-card { background: var(--upload-tint) !important; } | |
| .insights-card { background: var(--insights-tint) !important; } | |
| .plan-card { background: var(--plan-tint) !important; } | |
| .chat-card { background: var(--chat-tint) !important; } | |
| /* Home tab specificity */ | |
| .evolution-card, .snapshot-card { | |
| background: var(--panel2) !important; | |
| box-shadow: none !important; | |
| opacity: 0.85; | |
| } | |
| .evolution-card h2, .snapshot-card h2 { | |
| font-size: 1.1rem !important; | |
| opacity: 0.8; | |
| } | |
| .checkin-card { | |
| border-left: 4px solid var(--border-strong); | |
| padding-left: 20px; | |
| margin-bottom: 24px; | |
| background: var(--panel) !important; | |
| } | |
| .focus-highlight p { | |
| font-size: 1.15rem !important; | |
| font-weight: 600 !important; | |
| line-height: 1.4; | |
| color: var(--text) !important; | |
| } | |
| /* Helper text */ | |
| .muted, .muted * { color: var(--muted) !important; font-size: 0.95rem; } | |
| /* Metric refinement for Structure block */ | |
| .metric-row { | |
| margin-bottom: 4px; | |
| line-height: 1.4; | |
| } | |
| .metric-label { | |
| font-weight: 600; | |
| color: var(--text); | |
| opacity: 0.85; | |
| } | |
| .metric-value { | |
| font-weight: 400; | |
| color: var(--text); | |
| } | |
| .subtext { | |
| font-size: 1rem; | |
| color: var(--text); | |
| display: inline; | |
| margin-left: 2px; | |
| } | |
| .coaching-tip { | |
| margin-top: 12px; | |
| padding-top: 8px; | |
| border-top: 1px dashed var(--border); | |
| font-style: italic; | |
| color: var(--muted); | |
| font-size: 0.95rem; | |
| } | |
| /* Primary components */ | |
| .primary-lever, .plan-hero { | |
| border-radius: 14px; | |
| padding: 12px 14px; | |
| border: 1px solid var(--border-strong); | |
| background: var(--card-bg-insights) !important; | |
| box-shadow: var(--shadow-sm); | |
| } | |
| .primary-lever p, .plan-hero p { font-size: 1.05rem; line-height: 1.35; color: var(--text) !important; } | |
| .warning-banner { | |
| border-radius: 14px; | |
| padding: 12px 14px; | |
| border: 1px solid var(--border-strong); | |
| background: var(--warning-bg) !important; | |
| color: var(--warning-text) !important; | |
| font-weight: 700; | |
| } | |
| .details-box, .plan-body { | |
| border-radius: 12px; | |
| padding: 10px 12px; | |
| border: 1px solid var(--border); | |
| background: var(--card-bg-insights) !important; | |
| } | |
| .plan-body h3 { | |
| margin: 12px 0 6px; | |
| font-size: 1.05rem; | |
| color: var(--text) !important; | |
| } | |
| /* File Uploader Fix */ | |
| .file-uploader { | |
| background: transparent !important; | |
| border: 1px solid var(--border) !important; | |
| } | |
| .file-uploader .upload-container { | |
| background: var(--panel2) !important; | |
| border: 2px dashed var(--border-strong) !important; | |
| } | |
| .dark .file-uploader .upload-container { | |
| background: rgba(255,255,255,0.03) !important; | |
| } | |
| .file-uploader * { color: var(--text) !important; } | |
| /* Starter prompt buttons as pills */ | |
| .starter-row button { | |
| border-radius: 999px !important; | |
| font-size: 0.9rem !important; | |
| padding: 8px 12px !important; | |
| white-space: nowrap; | |
| background: var(--panel2) !important; | |
| border: 1px solid var(--border) !important; | |
| color: var(--text) !important; | |
| } | |
| /* Primary buttons */ | |
| .gr-button-primary, button.primary { | |
| border-radius: 12px !important; | |
| font-weight: 700 !important; | |
| } | |
| /* Chat area */ | |
| .chat-card { | |
| border-radius: 16px; | |
| padding: 12px; | |
| border: 1px solid var(--border); | |
| } | |
| /* Risk badge styling */ | |
| .risk-badge { | |
| border-radius: 999px; | |
| padding: 6px 10px; | |
| border: 1px solid var(--border-strong); | |
| background: var(--card-bg-insights) !important; | |
| display: inline-block; | |
| } | |
| .risk-badge .wrap { | |
| justify-content: center; | |
| } | |
| .risk-badge * { color: var(--text) !important; } | |
| /* Tabs as pills */ | |
| .gradio-container .tabs { | |
| gap: 8px; | |
| border-bottom: none !important; | |
| } | |
| .gradio-container .tabs button { | |
| border-radius: 999px !important; | |
| font-weight: 700 !important; | |
| padding: 8px 14px !important; | |
| border: 1px solid var(--border) !important; | |
| background: var(--panel2) !important; | |
| box-shadow: var(--shadow-sm) !important; | |
| color: var(--text) !important; | |
| } | |
| .gradio-container .tabs button[aria-selected="true"] { | |
| background: var(--border-strong) !important; | |
| border-color: var(--border-strong) !important; | |
| } | |
| /* Performance Card Styling */ | |
| .performance-card { | |
| background: var(--panel) !important; | |
| border-radius: 20px; | |
| padding: 24px; | |
| border: 1px solid var(--border-strong); | |
| box-shadow: var(--shadow); | |
| margin-bottom: 24px; | |
| } | |
| .performance-card .card-header { | |
| display: flex; | |
| align-items: center; | |
| gap: 12px; | |
| margin-bottom: 20px; | |
| } | |
| .performance-card .indicator { | |
| font-size: 2.2rem; | |
| } | |
| .performance-card .title { | |
| font-size: 1.4rem; | |
| font-weight: 800; | |
| color: var(--text); | |
| } | |
| .performance-card .metrics-grid { | |
| display: grid; | |
| grid-template-columns: repeat(4, 1fr); | |
| gap: 16px; | |
| margin-bottom: 20px; | |
| } | |
| .performance-card .metric-item { | |
| display: flex; | |
| flex-direction: column; | |
| align-items: flex-start; | |
| } | |
| .performance-card .m-val { | |
| font-size: 1.25rem; | |
| font-weight: 700; | |
| color: var(--text); | |
| } | |
| .performance-card .m-lab { | |
| font-size: 0.85rem; | |
| color: var(--muted); | |
| text-transform: uppercase; | |
| letter-spacing: 0.5px; | |
| } | |
| .performance-card .divider { | |
| height: 1px; | |
| background: var(--border); | |
| margin: 16px 0; | |
| } | |
| .performance-card .insight-section p { | |
| font-size: 1.05rem; | |
| line-height: 1.5; | |
| color: var(--text); | |
| margin-bottom: 12px; | |
| } | |
| .performance-card .forward-focus { | |
| background: var(--panel2); | |
| padding: 12px 16px; | |
| border-radius: 12px; | |
| border-left: 4px solid var(--link); | |
| font-size: 1rem; | |
| } | |
| .progress-panel { | |
| font-size: 15px; | |
| opacity: 0.95; | |
| border-radius: 12px; | |
| padding: 12px 16px; | |
| background: var(--upload-tint) !important; | |
| border: 1px solid var(--border-strong) !important; | |
| margin-bottom: 12px; | |
| animation: pulse 2s infinite ease-in-out; | |
| } | |
| @keyframes pulse { | |
| 0% { opacity: 0.8; } | |
| 50% { opacity: 1; } | |
| 100% { opacity: 0.8; } | |
| } | |
| """ | |
| with gr.Blocks( | |
| title="Runner Agentic Intelligence", | |
| theme=gr.themes.Soft(), | |
| css=CUSTOM_CSS, | |
| ) as demo: | |
| import config | |
| # 1. Header Section | |
| with gr.Row(): | |
| with gr.Column(): | |
| title_md = gr.Markdown(UI_TEXT["en"]["title"]) | |
| subtitle_md = gr.Markdown(UI_TEXT["en"]["subtitle"]) | |
| header_key = "evolution_workflow" if config.is_storage_enabled() else "workflow" | |
| workflow_md = gr.Markdown(UI_TEXT["en"][header_key]) | |
| with gr.Column(scale=0, min_width=150): | |
| language_selector = gr.Dropdown( | |
| choices=[("English", "en"), ("PortuguΓͺs (Brasil)", "pt-BR")], | |
| value="en", | |
| label="Language / Idioma", | |
| interactive=True, | |
| ) | |
| # 2. Public preview notice β shown above tabs | |
| is_preview = config.is_hf_space() | |
| persistence_enabled = config.is_persistence_enabled() | |
| storage_key = ( | |
| "banner_persistence_enabled" if persistence_enabled else "banner_persistence_disabled" | |
| ) | |
| with gr.Accordion(UI_TEXT["en"]["banner_title"], open=is_preview) as banner_acc: | |
| banner_md = gr.Markdown( | |
| f"- {UI_TEXT['en']['banner_session']}\n" | |
| f"- {UI_TEXT['en'][storage_key]}\n" | |
| f"- {UI_TEXT['en']['banner_medical']}" | |
| #f"- {UI_TEXT['en']['banner_full']}" | |
| ) | |
| # State to hold the orchestrator instance | |
| orchestrator_state = gr.State(None) | |
| # State to hold the latest charts | |
| charts_state = gr.State({}) | |
| # State to hold selected tab | |
| selected_tab = gr.State("id_analyse") | |
| progress_box = gr.Markdown(visible=False, elem_classes=["progress-panel"]) | |
| with gr.Tabs(selected="id_analyse") as tabs: | |
| with gr.Tab(label="π " + UI_TEXT["en"]["tab_home"], id="id_home", visible=False) as tab_home: | |
| with gr.Row(): | |
| home_week_range = gr.Markdown("### " + UI_TEXT["en"]["lbl_home_week_range"], elem_id="home_week_range") | |
| home_week_toggle = gr.Radio( | |
| choices=[ | |
| (UI_TEXT["en"]["current_week_label"], "current"), | |
| (UI_TEXT["en"]["last_week_label"], "last"), | |
| ], | |
| value="current", | |
| label=None, | |
| show_label=False, | |
| container=False, | |
| scale=0, | |
| ) | |
| # --- 1. Coach Brief --- | |
| home_brief_header = gr.Accordion("π " + UI_TEXT["en"]["lbl_coach_brief"], open=True) | |
| with home_brief_header: | |
| home_brief_content = gr.Markdown("") | |
| # --- 2. Next Recommended Run --- | |
| home_next_run_header = gr.Accordion("π " + UI_TEXT["en"]["next_run.title"], open=True) | |
| with home_next_run_header: | |
| home_next_run_card = gr.Markdown(visible=False) | |
| # --- 3. Weekly Snapshot (Your Week) --- | |
| your_week_group = gr.Accordion("ποΈ " + UI_TEXT["en"]["lbl_your_week"], open=True) | |
| with your_week_group: | |
| with gr.Row(): | |
| with gr.Column(scale=2, visible=True): | |
| # Runner Story narrartive | |
| #home_evolution_md = gr.Markdown(UI_TEXT["en"]["sec_evolution"]) | |
| home_comparison_table = gr.Markdown("") | |
| with gr.Column(scale=1, visible=True): | |
| # Weekly Snapshot metrics | |
| #home_snapshot_md = gr.Markdown(f"## {UI_TEXT['en']['lbl_snapshot_title']}") | |
| home_snapshot_metrics = gr.Markdown("") | |
| # --- 4. Training Recommendation --- | |
| home_rec_group = gr.Accordion(UI_TEXT["en"]["rec_title"], open=False) | |
| with home_rec_group: | |
| home_recommendation_md = gr.Markdown("") | |
| # --- 5. Insights & Trends --- | |
| home_insights_group = gr.Accordion("π§ " + UI_TEXT["en"]["lbl_key_insight"], open=False, visible=False) | |
| #home_insights_header = gr.Markdown("## π§ " + UI_TEXT["en"]["lbl_key_insight"]) | |
| with home_insights_group: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| home_coach_insight_md = gr.Markdown("") | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| home_health_signal_md = gr.Markdown("") | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| home_goal_trajectory_md = gr.Markdown("") | |
| # --- 6. Evidence Breakdown --- | |
| home_evidence_group = gr.Accordion("π§ " + UI_TEXT["en"]["runner_positioning_title"], open=False, visible=False) | |
| with home_evidence_group: | |
| #home_evidence_header = gr.Markdown("## π§ " + UI_TEXT["en"]["runner_positioning_title"]) | |
| with gr.Group(elem_classes=["section-card"]): | |
| home_evidence_md = gr.Markdown("") | |
| # --- 7. Performance Card (v1) --- | |
| performance_card_group = gr.Accordion("π " + UI_TEXT["en"]["performance_first_week_title"], open=False, visible=False, elem_id="performance_card_container") | |
| with performance_card_group: | |
| home_card_html = gr.HTML("", label=None) | |
| btn_download_card = gr.Button( | |
| UI_TEXT["en"]["btn_download_card"], | |
| variant="secondary", | |
| visible=False, | |
| size="sm" | |
| ) | |
| home_card_state = gr.State(None) | |
| home_card_download_file = gr.File(label=None, visible=False) | |
| # --- 8. Goal Progress --- | |
| home_goal_group = gr.Accordion("π― " + UI_TEXT["en"]["sec_goal"], open=False, visible=False, elem_classes=["section-card", "goal-card"]) | |
| with home_goal_group: | |
| #home_goal_title = gr.Markdown(UI_TEXT["en"]["sec_goal"]) | |
| with gr.Row(): | |
| home_goal_summary = gr.Markdown("") | |
| home_goal_status = gr.Markdown("") | |
| home_goal_progress_bar = gr.Slider( | |
| label=UI_TEXT["en"]["lbl_progress"], | |
| interactive=False, | |
| minimum=0, | |
| maximum=100, | |
| ) | |
| # --- 9. Training Structure --- | |
| home_structure_group = gr.Accordion("π§± " + UI_TEXT["en"]["sec_structure"], open=False, visible=False, elem_id="structure_card_container") | |
| with home_structure_group: | |
| #home_structure_md = gr.Markdown("## π§± " + UI_TEXT["en"]["sec_structure"]) | |
| home_structure_metrics = gr.Markdown("") | |
| # Hidden/Unused in new layout but kept for variable binding | |
| home_focus_content = gr.Markdown("", visible=False) | |
| with gr.Tab(label="π‘ " + UI_TEXT["en"]["tab_intelligence"], id="id_intelligence", visible=False) as tab_intelligence: | |
| current_state_lbl = gr.Accordion(UI_TEXT["en"]["lbl_current_state"], open=True, elem_classes=["section-card"]) | |
| with current_state_lbl: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| positioning_header = gr.Markdown("") | |
| state_md = gr.Markdown("") | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| health_signal_lbl = gr.Markdown("### " + UI_TEXT["en"]["lbl_health_signal"]) | |
| health_signal_md = gr.Markdown("") | |
| with gr.Column(): | |
| with gr.Group(elem_classes=["section-card"]): | |
| goal_trajectory_lbl = gr.Markdown("### " + UI_TEXT["en"]["lbl_goal_trajectory"]) | |
| goal_trajectory_md = gr.Markdown("") | |
| risk_level_box = gr.Markdown("", visible=False) | |
| key_insight_lbl = gr.Accordion("π§ " + UI_TEXT["en"]["lbl_key_insight"], open=True) | |
| with key_insight_lbl: | |
| coach_insight_md = gr.Markdown("") # Resolves positioning_outputs[1] | |
| lever_box = gr.Markdown("") # Resolves analysis_results_outputs[1] | |
| details_box = gr.Markdown("") # Resolves analysis_results_outputs[2] | |
| insight_md = gr.Markdown("") # Resolves positioning_outputs[8] | |
| forward_focus_lbl = gr.Accordion("π― " + UI_TEXT["en"]["lbl_forward_focus"], open=False) | |
| with forward_focus_lbl: | |
| with gr.Group(elem_classes=["section-card"]): | |
| forward_focus_md = gr.Markdown("") | |
| recommendation_md = gr.Markdown("") | |
| sec_plan_md = gr.Accordion("π " + UI_TEXT["en"]["sec_plan"], open=False) | |
| with sec_plan_md: | |
| safety_banner = gr.Markdown(visible=False) | |
| plan_hero = gr.Markdown("") | |
| plan_body = gr.Markdown("") | |
| with gr.Accordion("π― " + UI_TEXT["en"]["acc_adjustments"], open=False) as adjustments_acc: | |
| adjustments_box = gr.Markdown("") | |
| notes_box = gr.Markdown("") | |
| sec_charts_md = gr.Accordion("π " + UI_TEXT["en"]["sec_charts"], open=False) | |
| with sec_charts_md: | |
| with gr.Row(): | |
| pace_plot = gr.Image(label=UI_TEXT["en"]["lbl_pace_trend"], interactive=False) | |
| hr_plot = gr.Image(label=UI_TEXT["en"]["lbl_hr_trend"], interactive=False) | |
| with gr.Accordion("π " + UI_TEXT["en"]["lbl_details"], open=False) as details_acc: | |
| with gr.Row(): | |
| with gr.Column(): | |
| training_phase_md = gr.Markdown("", visible=False) | |
| trajectory_md = gr.Markdown("", visible=False) | |
| with gr.Column(): | |
| evidence_md = gr.Markdown("", visible=False) | |
| with gr.Tab(label="π " + UI_TEXT["en"]["tab_analyse"], id="id_analyse") as tab_analyse: | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| sec_upload_md = gr.Accordion(UI_TEXT["en"]["sec_upload"], elem_classes=["section-card", "upload-card"], open=False) | |
| with sec_upload_md: | |
| upload_hints_md = gr.Markdown(UI_TEXT["en"]["upload_hints"]) | |
| reassurance_msg = gr.Markdown(visible=False) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| upload = gr.File( | |
| label=UI_TEXT["en"]["upload_label"], | |
| file_count="multiple", | |
| type="filepath", | |
| elem_classes=["file-uploader"], | |
| ) | |
| with gr.Column(scale=1): | |
| folder_input = gr.Textbox( | |
| label=UI_TEXT["en"]["lbl_local_folder_path"], | |
| visible=False, | |
| ) | |
| with gr.Row(): | |
| run_btn = gr.Button( | |
| UI_TEXT["en"]["btn_analyse"], | |
| variant="primary", | |
| interactive=False, | |
| ) | |
| reset_btn = gr.Button( | |
| UI_TEXT["en"]["btn_reset"], variant="secondary", | |
| visible=True if config.is_hf_space() else False, | |
| ) | |
| run_folder_btn = gr.Button(UI_TEXT["en"]["btn_analyse_folder"], visible=False) | |
| # Re-integrating Chatbot here at the bottom of Runs for better context | |
| chat_header = gr.Accordion("ππΎββοΈ " + UI_TEXT["en"]["sec_chat"], elem_classes=["section-card", "chat-panel"], open=False) | |
| with chat_header: | |
| context_indicator = gr.Markdown(UI_TEXT["en"]["chat_indicator_no"], visible=True) | |
| chatbot = gr.Chatbot(height=400, type="messages") | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder=UI_TEXT["en"]["chat_placeholder"], scale=4) | |
| submit_btn = gr.Button(UI_TEXT["en"]["btn_send"], scale=1) | |
| with gr.Row(): | |
| starter_limiter = gr.Button(UI_TEXT["en"]["btn_starter_limiter"], size="sm") | |
| starter_why = gr.Button(UI_TEXT["en"]["btn_starter_why"], size="sm") | |
| starter_fatigue = gr.Button(UI_TEXT["en"]["btn_starter_fatigue"], size="sm") | |
| clear = gr.Button(UI_TEXT["en"]["btn_clear"], size="sm") | |
| # Templates (collapsed) | |
| with gr.Accordion(UI_TEXT["en"]["acc_knowledge_base"], open=False) as knowledge_acc: | |
| from prompts.store import get_template_store | |
| store = get_template_store() | |
| categories = sorted(list(set([t.category for t in store.list_templates()]))) | |
| with gr.Row(): | |
| cat_drop = gr.Dropdown(choices=categories, label=UI_TEXT["en"]["lbl_category"]) | |
| template_drop = gr.Dropdown(choices=[], label=UI_TEXT["en"]["lbl_template"]) | |
| template_preview = gr.Textbox(label=UI_TEXT["en"]["lbl_preview"], interactive=False, lines=4) | |
| use_btn = gr.Button(UI_TEXT["en"]["btn_insert_message"]) | |
| # Chat moved here or kept in Coach tab? The prompt said "Tab Simplification: Home, Intelligence, Runs, Profile". | |
| # I'll put Chat inside Intelligence or a separate tab if really needed, but prompt says "Home Intelligence Runs Profile". | |
| # I'll move Chat into Intelligence at the bottom or merge into Runs. | |
| # Actually, many users like chat in Its own tab or at the bottom. | |
| # I'll stick to the 4 tabs requested. I'll put the Coach/Chat at the bottom of Home or Intelligence. | |
| # Let's put it at the bottom of Intelligence but above Details. Or just a 5th tab if it feels better, | |
| # but the prompt was quite specific: "Home Intelligence Runs Profile". | |
| # I'll put Chat into Run tab as a "Consult Coach" section after analysis. | |
| with gr.Tab(label="π€ " + UI_TEXT["en"]["tab_profile"], id="id_profile", visible=False) as tab_profile: | |
| with gr.Group(elem_classes=["section-card"]): | |
| sec_profile = gr.Markdown(UI_TEXT["en"]["sec_profile"]) | |
| with gr.Row(): | |
| prof_display_name = gr.Textbox(label=UI_TEXT["en"]["lbl_display_name"]) | |
| prof_age = gr.Number(label=UI_TEXT["en"]["lbl_age"], precision=0) | |
| with gr.Row(): | |
| prof_experience = gr.Dropdown( | |
| choices=["beginner", "intermediate", "advanced"], | |
| label=UI_TEXT["en"]["lbl_experience"], | |
| ) | |
| prof_baseline = gr.Number(label=UI_TEXT["en"]["lbl_baseline"], minimum=0) | |
| prof_gender = gr.Dropdown( | |
| choices=[ | |
| (UI_TEXT["en"]["lbl_gender_male"], "male"), | |
| (UI_TEXT["en"]["lbl_gender_female"], "female"), | |
| (UI_TEXT["en"]["lbl_gender_pns"], "prefer_not_to_say"), | |
| (UI_TEXT["en"]["lbl_gender_none"], None), | |
| ], | |
| label=UI_TEXT["en"]["lbl_gender"], | |
| ) | |
| prof_notes = gr.TextArea(label=UI_TEXT["en"]["lbl_injury_notes"]) | |
| prof_save_btn = gr.Button(UI_TEXT["en"]["btn_save_profile"], variant="primary") | |
| prof_status = gr.Markdown("") | |
| with gr.Group(elem_classes=["section-card"]): | |
| sec_goal_prof = gr.Markdown(UI_TEXT["en"]["sec_goal"]) | |
| with gr.Row(): | |
| goal_type = gr.Dropdown( | |
| choices=[ | |
| (UI_TEXT["en"]["goal_type_race"], "race"), | |
| (UI_TEXT["en"]["goal_type_volume"], "volume"), | |
| (UI_TEXT["en"]["goal_type_pace"], "pace"), | |
| ], | |
| label=UI_TEXT["en"]["lbl_goal_type"], | |
| ) | |
| goal_target = gr.Number(label=UI_TEXT["en"]["lbl_target"]) | |
| goal_unit = gr.Textbox(label=UI_TEXT["en"]["lbl_unit"]) | |
| goal_date = gr.Textbox(label=UI_TEXT["en"]["lbl_date"], placeholder="YYYY-MM-DD") | |
| goal_save_btn = gr.Button(UI_TEXT["en"]["btn_save_goal"], variant="primary") | |
| goal_status = gr.Markdown("") | |
| # Chat logic was moved to Runs tab | |
| async def user_chat(message, history, orch, charts, language="en"): | |
| import tempfile | |
| if not message.strip(): | |
| return "", history, orch, charts | |
| # Ensure orchestrator is initialized | |
| if orch is None: | |
| orch = await create_orchestrator() | |
| # Use specific language for chat | |
| response_dict = await orch.chat(message, language=language) | |
| history.append({"role": "user", "content": message}) | |
| if "response" in response_dict: | |
| response = response_dict["response"] | |
| history.append({"role": "assistant", "content": response}) | |
| if "chart" in response_dict: | |
| fig = response_dict["chart"] | |
| import tempfile | |
| with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: | |
| # Handle both fresh Matplotlib figures and decoded PIL Images from persistence | |
| if hasattr(fig, "savefig"): | |
| fig.savefig(tmp.name) | |
| elif hasattr(fig, "save"): | |
| fig.save(tmp.name) | |
| else: | |
| logger.error(f"Unsupported chart type for chat: {type(fig)}") | |
| tmp_path = None | |
| if tmp.name: | |
| tmp_path = tmp.name | |
| history.append( | |
| { | |
| "role": "assistant", | |
| "content": {"path": tmp_path, "alt_text": "Generated Chart"}, | |
| } | |
| ) | |
| return "", history, orch, charts | |
| # Event helpers | |
| def _toggle_analyse_button(files): | |
| return gr.update(interactive=bool(files), variant="secondary") | |
| # Starter Question Buttons Logic | |
| def get_starter_q(lang, key): | |
| return UI_TEXT.get(lang, UI_TEXT["en"]).get(key, "") | |
| # Prompt Templates Section Logic | |
| def update_templates(cat): | |
| templates = store.list_by_category(cat or "") | |
| return gr.Dropdown(choices=[t.label for t in templates], value=None) | |
| def preview_template(cat, label): | |
| templates = store.list_by_category(cat or "") | |
| match = next((t for t in templates if t.label == label), None) | |
| if match: | |
| return match.templateBody | |
| return "" | |
| async def wrap_set_goal(orch, gtype, tval, tunit, tdate, lang, week_selection): | |
| status, _ = await set_runner_goal(orch, gtype, tval, tunit, tdate) | |
| # Fetch latest data to update UI components | |
| home_updates = await _format_home_dashboard(orch, language=lang, week_selection=week_selection) | |
| return ( | |
| status, | |
| *home_updates | |
| ) | |
| analysis_results_outputs = [ | |
| reassurance_msg, | |
| lever_box, | |
| details_box, | |
| safety_banner, | |
| plan_hero, | |
| adjustments_box, | |
| notes_box, | |
| plan_body, | |
| pace_plot, | |
| hr_plot, | |
| risk_level_box, | |
| msg, | |
| context_indicator, | |
| orchestrator_state, | |
| charts_state, | |
| tabs, | |
| selected_tab, | |
| home_week_toggle, | |
| ] | |
| home_outputs = [ | |
| home_week_range, | |
| home_comparison_table, | |
| home_brief_content, | |
| home_focus_content, | |
| #home_snapshot_md, | |
| home_snapshot_metrics, | |
| home_structure_metrics, | |
| home_goal_summary, | |
| home_goal_status, | |
| home_goal_progress_bar, | |
| home_card_html, | |
| home_card_state, | |
| btn_download_card, | |
| #home_evolution_md, | |
| home_next_run_card, | |
| home_coach_insight_md, | |
| home_health_signal_md, | |
| home_goal_trajectory_md, | |
| home_evidence_md, | |
| home_recommendation_md, | |
| home_insights_group, # 18 | |
| home_evidence_group, # 19 | |
| home_rec_group, # 20 | |
| home_goal_group, # 21 | |
| home_structure_group, # 22 | |
| performance_card_group, # 23 | |
| home_next_run_header, # 24 | |
| your_week_group, # 25 | |
| pace_plot, # 26 | |
| hr_plot, # 27 | |
| ] | |
| positioning_outputs = [ | |
| positioning_header, | |
| coach_insight_md, | |
| state_md, | |
| health_signal_md, | |
| goal_trajectory_md, | |
| training_phase_md, | |
| forward_focus_md, | |
| trajectory_md, | |
| insight_md, | |
| evidence_md, | |
| recommendation_md, | |
| safety_banner, | |
| plan_hero, | |
| adjustments_box, | |
| notes_box, | |
| plan_body, | |
| #pace_plot, | |
| #hr_plot, | |
| ] | |
| all_localized_outputs = [ | |
| title_md, subtitle_md, workflow_md, | |
| tab_analyse, tab_intelligence, tab_profile, tab_home, | |
| sec_upload_md, upload_hints_md, upload, run_btn, reset_btn, folder_input, run_folder_btn, | |
| risk_level_box, lever_box, details_box, | |
| sec_plan_md, sec_charts_md, adjustments_acc, pace_plot, hr_plot, plan_body, | |
| home_brief_header, chatbot, msg, submit_btn, clear, | |
| starter_limiter, starter_why, starter_fatigue, context_indicator, | |
| knowledge_acc, cat_drop, template_drop, template_preview, use_btn, | |
| details_acc, banner_acc, banner_md, | |
| sec_profile, prof_display_name, prof_age, prof_experience, prof_notes, prof_save_btn, prof_baseline, prof_gender, | |
| sec_goal_prof, goal_type, goal_target, goal_unit, goal_date, goal_save_btn, | |
| current_state_lbl, health_signal_lbl, goal_trajectory_lbl, key_insight_lbl, forward_focus_lbl, | |
| your_week_group, home_week_toggle, | |
| home_structure_group, #home_structure_md, | |
| home_goal_group, #home_goal_title, | |
| home_rec_group, #home_rec_header, | |
| home_evidence_group, #home_evidence_header, | |
| home_insights_group, #home_insights_header, | |
| home_next_run_header, | |
| performance_card_group, | |
| chat_header, | |
| *home_outputs, | |
| *positioning_outputs | |
| ] | |
| async def initialize_app(lang="en", week_selection="current"): | |
| import time | |
| import config | |
| from datetime import datetime | |
| start_time = time.time() | |
| orch = await create_orchestrator() | |
| profile = await get_runner_profile(orch) #if config.is_persistence_enabled() else None | |
| has_ctx = has_analysis_context(orch) #Set to False if Empty analysis exists? | |
| t = UI_TEXT.get(lang, UI_TEXT["en"]) | |
| indicator_val = t["chat_indicator_no"] | |
| indicator_visible = not has_ctx | |
| if orch.analysis_repo: | |
| try: | |
| last = orch.analysis_repo.get_last_analysis() | |
| if has_ctx: | |
| ts = last.created_at.strftime("%Y-%m-%d %H:%M") | |
| indicator_val = f"{t.get('insights_timestamp_label', 'Analysis from')} {ts}" | |
| indicator_visible = True | |
| #has_ctx = True # Correct: Mark context as available | |
| except Exception: | |
| pass | |
| if not profile: | |
| profile_updates = [gr.update() for _ in range(6)] | |
| else: | |
| profile_updates = [ | |
| profile.get("runner_display_name"), | |
| profile.get("age"), | |
| profile.get("experience_level"), | |
| profile.get("injury_history_notes"), | |
| profile.get("baseline_weekly_km"), | |
| profile.get("gender"), | |
| ] | |
| # Goal Data initialization | |
| goal_form_updates = [gr.update() for _ in range(4)] | |
| import config | |
| if orch.dto_response.active_goal: | |
| goal_form_updates = [ | |
| orch.dto_response.active_goal.get("type"), | |
| orch.dto_response.active_goal.get("target_value"), | |
| orch.dto_response.active_goal.get("unit"), | |
| datetime.fromisoformat(orch.dto_response.active_goal.get("target_date")).strftime("%Y-%m-%d") if orch.dto_response.active_goal.get("target_date") else "", | |
| ] | |
| # Use helper for home dashboard | |
| home_updates = await _format_home_dashboard(orch, language=lang, week_selection=week_selection) | |
| # home_updates is (week_header, evolution_md, brief_content, focus_content, snapshot_md, structure_md, goal_summary, goal_status_badge, goal_progress_val, card_html, card_view_model, download_btn_update) | |
| # Positioning Dashboard Logic | |
| pos_updates = await _format_positioning_dashboard(orch, language=lang, week_selection=week_selection) | |
| tab_selection = "id_home" | |
| logger.info(f"App initialization render time: {time.time() - start_time:.2f}s") | |
| return ( | |
| orch, | |
| *profile_updates, | |
| gr.update(value=indicator_val, visible=indicator_visible), | |
| #tabs, | |
| gr.update(selected=tab_selection), # selected_tab | |
| *home_updates, | |
| gr.update( | |
| choices=[(t["current_week_label"], "current"), (t["last_week_label"], "last")], | |
| value=week_selection, | |
| ), # home_week_toggle | |
| *goal_form_updates, | |
| gr.update(visible=True if has_ctx or config.is_persistence_enabled() else False), | |
| gr.update(visible=True if has_ctx else False), | |
| gr.update(visible=True if profile else False), | |
| *pos_updates, | |
| ) | |
| # Event helpers | |
| async def _process_uploads_with_progress(files, orch, lang, week_selection): | |
| if not files: | |
| empty = await _empty_results(orch, {}, language=lang) | |
| # _empty_results (62) + download (1) + tabs (2) = 65 | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False), *(gr.update() for _ in range(2))) | |
| return | |
| lang = lang or "en" | |
| # Stage 1: Uploading | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_1', lang)}", visible=True), *([gr.update() for _ in range(65)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 2: Snapshot Building | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_2', lang)}"), *([gr.update() for _ in range(65)])) | |
| await asyncio.sleep(0.4) | |
| try: | |
| runs = load_runs_from_uploaded_files(files) | |
| if not runs: | |
| empty = await _empty_results(orch, {}, language=lang) | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False), *(gr.update() for _ in range(2))) | |
| return | |
| # Stage 3: Analyzing Trends | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_3', lang)}"), *([gr.update() for _ in range(65)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 4: Coach Insights | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_4', lang)}"), *([gr.update() for _ in range(65)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 5: Recommendations | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_5', lang)}"), *([gr.update() for _ in range(65)])) | |
| # Real work happens here | |
| res = await _process_runs(runs, orch, language=lang) | |
| # Final updates and hide progress panel | |
| # res (65 items) already contains analysis(18) + home(28) + pos(16) + download(1) + tabs(2) | |
| yield (gr.update(visible=False), *res) | |
| except Exception as e: | |
| import logging | |
| logging.getLogger(__name__).error(f"Process uploads failed: {e}", exc_info=True) | |
| empty = await _empty_results(orch if 'torch' in locals() else orch, {}, language=lang) | |
| # Show error in reassurance message area | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False), *(gr.update() for _ in range(2))) | |
| async def _process_folder_with_progress(folder_path, orch, lang, week_selection): | |
| if not folder_path: | |
| empty = await _empty_results(orch, {}, language=lang) | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False)) | |
| return | |
| lang = lang or "en" | |
| # Stage 1: Reading folder | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_1', lang)}", visible=True), *([gr.update() for _ in range(63)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 2: Snapshot Building | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_2', lang)}"), *([gr.update() for _ in range(63)])) | |
| await asyncio.sleep(0.4) | |
| try: | |
| from ingestion.loader import load_runs_from_folder | |
| runs = load_runs_from_folder(folder_path) | |
| if not runs: | |
| empty = await _empty_results(orch, {}, language=lang) | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False)) | |
| return | |
| # Stage 3: Analyzing Trends | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_3', lang)}"), *([gr.update() for _ in range(63)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 4: Coach Insights | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_4', lang)}"), *([gr.update() for _ in range(63)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 5: Recommendations | |
| yield (gr.update(value=f"### {get_text('pipeline_stage_5', lang)}"), *([gr.update() for _ in range(63)])) | |
| res = await _process_runs(runs, orch, language=lang) | |
| # target: progress (1) + analysis (18) + home (28) + positioning (16) + download (1) = 64 | |
| yield (gr.update(visible=False), *res[:63]) | |
| except Exception as e: | |
| import logging | |
| logging.getLogger(__name__).error(f"Process folder failed: {e}", exc_info=True) | |
| empty = await _empty_results(orch, {}, language=lang) | |
| yield (gr.update(visible=False), *empty, gr.update(value=None, visible=False)) | |
| async def initialize_app_with_progress(lang): | |
| import config | |
| if config.is_persistence_enabled(): | |
| # Stage 1: Services | |
| yield (gr.update(value=f"### {get_text('init_stage_1', lang)}", visible=True), *([gr.update() for _ in range(61)])) | |
| await asyncio.sleep(0.3) | |
| # Stage 2: Profile | |
| yield (gr.update(value=f"### {get_text('init_stage_2', lang)}"), *([gr.update() for _ in range(61)])) | |
| await asyncio.sleep(0.3) | |
| # Stage 3: History | |
| yield (gr.update(value=f"### {get_text('init_stage_3', lang)}"), *([gr.update() for _ in range(61)])) | |
| await asyncio.sleep(0.3) | |
| # Stage 4: Dashboard | |
| yield (gr.update(value=f"### {get_text('init_stage_4', lang)}"), *([gr.update() for _ in range(61)])) | |
| res = await initialize_app(lang) | |
| # 1 (progress) + 62 (res) = 63 | |
| yield (gr.update(visible=False), *res) | |
| async def change_week_with_progress(orch, week_selection, lang): | |
| # Stage 1: Switching | |
| yield (gr.update(value=f"### {get_text('week_stage_1', lang)}", visible=True), *([gr.update() for _ in range(28)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 2: Intelligence | |
| yield (gr.update(value=f"### {get_text('week_stage_2', lang)}"), *([gr.update() for _ in range(28)])) | |
| await asyncio.sleep(0.4) | |
| # Stage 3: Dashboard | |
| yield (gr.update(value=f"### {get_text('week_stage_3', lang)}"), *([gr.update() for _ in range(28)])) | |
| res = await change_week(orch, week_selection, lang) | |
| # 1 (progress) + 28 (res) = 29 | |
| yield (gr.update(visible=False), *res) | |
| demo.load( | |
| initialize_app_with_progress, | |
| inputs=[language_selector], | |
| outputs=[ | |
| progress_box, | |
| orchestrator_state, | |
| prof_display_name, | |
| prof_age, | |
| prof_experience, | |
| prof_notes, | |
| prof_baseline, | |
| prof_gender, | |
| context_indicator, | |
| #tabs, | |
| tabs, | |
| *home_outputs, | |
| home_week_toggle, | |
| goal_type, | |
| goal_target, | |
| goal_unit, | |
| goal_date, | |
| tab_home, | |
| tab_intelligence, | |
| tab_profile, | |
| *positioning_outputs, | |
| ], | |
| show_progress="hidden" | |
| ) | |
| # Analysis Event Bindings | |
| upload.change(_toggle_analyse_button, inputs=[upload], outputs=[run_btn], show_progress="hidden") | |
| run_btn.click( | |
| _process_uploads_with_progress, | |
| inputs=[upload, orchestrator_state, language_selector, home_week_toggle], | |
| outputs=[ | |
| progress_box, | |
| *analysis_results_outputs, | |
| *home_outputs, | |
| *positioning_outputs, | |
| home_card_download_file, | |
| tab_home, | |
| tab_intelligence | |
| ], | |
| show_progress="hidden", | |
| ) | |
| run_folder_btn.click( | |
| _process_folder_with_progress, | |
| inputs=[folder_input, orchestrator_state, language_selector, home_week_toggle], | |
| outputs=[ | |
| progress_box, | |
| *analysis_results_outputs, | |
| *home_outputs, | |
| *positioning_outputs, | |
| home_card_download_file, | |
| ], | |
| show_progress="hidden", | |
| ) | |
| reset_btn.click( | |
| reset_ui, | |
| inputs=[orchestrator_state, language_selector], | |
| outputs=[ | |
| upload, | |
| run_btn, | |
| *analysis_results_outputs, | |
| *home_outputs, | |
| *positioning_outputs, | |
| home_card_download_file, | |
| tab_home, | |
| tab_intelligence, | |
| tab_analyse, | |
| chatbot, | |
| ], | |
| show_progress="hidden", | |
| ) | |
| # Chat Event Bindings | |
| msg.submit( | |
| user_chat, | |
| [msg, chatbot, orchestrator_state, charts_state, language_selector], | |
| [msg, chatbot, orchestrator_state, charts_state], | |
| show_progress="hidden", | |
| ) | |
| submit_btn.click( | |
| user_chat, | |
| [msg, chatbot, orchestrator_state, charts_state, language_selector], | |
| [msg, chatbot, orchestrator_state, charts_state], | |
| show_progress="hidden", | |
| ) | |
| clear.click(lambda: [], None, chatbot, queue=False) | |
| # Starter Question Buttons Bindings | |
| starter_limiter.click( | |
| get_starter_q, | |
| inputs=[language_selector, gr.State("chat_starter_limiter")], | |
| outputs=[msg], | |
| show_progress="hidden", | |
| ) | |
| starter_why.click( | |
| get_starter_q, inputs=[language_selector, gr.State("chat_starter_why")], outputs=[msg], show_progress="hidden" | |
| ) | |
| starter_fatigue.click( | |
| get_starter_q, | |
| inputs=[language_selector, gr.State("chat_starter_fatigue")], | |
| outputs=[msg], | |
| show_progress="hidden", | |
| ) | |
| # Template Bindings | |
| cat_drop.change(update_templates, inputs=[cat_drop], outputs=[template_drop], show_progress="hidden") | |
| template_drop.change( | |
| preview_template, inputs=[cat_drop, template_drop], outputs=[template_preview], show_progress="hidden" | |
| ) | |
| use_btn.click(lambda x: x, inputs=[template_preview], outputs=[msg], show_progress="hidden") | |
| btn_download_card.click( | |
| download_performance_card, | |
| inputs=[home_card_state], | |
| outputs=[home_card_download_file], | |
| show_progress="hidden", | |
| ) | |
| goal_save_btn.click( | |
| wrap_set_goal, | |
| inputs=[ | |
| orchestrator_state, | |
| goal_type, | |
| goal_target, | |
| goal_unit, | |
| goal_date, | |
| language_selector, | |
| home_week_toggle, | |
| ], | |
| outputs=[goal_status, *home_outputs], | |
| show_progress="hidden", | |
| ) | |
| prof_save_btn.click( | |
| save_runner_profile, | |
| inputs=[ | |
| orchestrator_state, | |
| prof_display_name, | |
| prof_age, | |
| prof_experience, | |
| prof_notes, | |
| prof_baseline, | |
| prof_gender, | |
| ], | |
| outputs=[prof_status, gr.State()], | |
| show_progress="hidden", | |
| ) | |
| # --- Language Switch Logic --- | |
| async def update_language(lang, orch, week_selection="current"): | |
| t = UI_TEXT.get(lang, UI_TEXT["en"]) | |
| has_ctx = has_analysis_context(orch) | |
| indicator_val = t["chat_indicator_no"] | |
| indicator_visible = not has_ctx | |
| # --- Results / Insights logic --- | |
| brief_content = "" | |
| focus_val = "" | |
| evolution_md = "" | |
| structure_md = "" | |
| goal_summary = "" | |
| goal_status_badge = "" | |
| goal_progress_val = 0 | |
| snapshot_md = "" | |
| snapshot_header = "" | |
| week_range_header = "" | |
| home_card_html_upd = gr.update(value="", visible=False) | |
| home_card_state_val = None | |
| btn_download_upd = gr.update(visible=False) | |
| lever_content = "" | |
| details_content = "" | |
| if orch: | |
| try: | |
| # Resolve regeneration logic | |
| new_brief, new_focus = await _ensure_home_brief_lang(orch, week_selection, lang) | |
| except Exception as e: | |
| logger.error(f"Error ensuring home brief language: {e}") | |
| # Fallback for dynamic lists to avoid index shifts in Gradio outputs | |
| home_vals_list = [gr.update() for _ in range(28)] | |
| # Label fix for week range selector | |
| home_vals_list[0] = gr.update(label=t["lbl_home_week_range"]) | |
| if orch: | |
| try: | |
| # Use helper for home dashboard | |
| h_res = await _format_home_dashboard( | |
| orch, language=lang, week_selection=week_selection | |
| ) | |
| if h_res and len(h_res) == 28: #Needs to be increased if more widgets are added | |
| home_vals_list = list(h_res) | |
| # Preserve label for home_week_range (index 0) | |
| val0 = h_res[0].get('value') if isinstance(h_res[0], dict) else getattr(h_res[0], 'value', '') | |
| home_vals_list[0] = gr.update(value=val0, label=t["lbl_home_week_range"]) | |
| except Exception as e: | |
| logger.error(f"Error updating home dashboard language: {e}") | |
| pos_vals_list = [gr.update() for _ in range(16)] | |
| try: | |
| p_res = await _format_positioning_dashboard(orch, language=lang) | |
| if p_res and len(p_res) == 16: | |
| pos_vals_list = list(p_res) | |
| except Exception as e: | |
| logger.error(f"Error updating positioning dashboard language: {e}") | |
| storage_enabled = config.is_storage_enabled() | |
| storage_key = ( | |
| "banner_persistence_enabled" if storage_enabled else "banner_persistence_disabled" | |
| ) | |
| indicator_visible = orch is not None and getattr(orch, "latest_summary", None) is not None | |
| indicator_val = t["chat_context_indicator"] if indicator_visible else "" | |
| week_date = get_start_week_date(week_selection) | |
| context = await orch.get_or_build_intelligence_snapshot(week_date, lang) | |
| if context: | |
| # Reassurance mapping | |
| parsed_label = t.get("lbl_parsed", "Parsed") | |
| runs_label = t.get("unit_runs", "run(s)") | |
| weeks_label = t.get("unit_weeks", "week(s)") | |
| across_label = t.get("lbl_across", "across") | |
| reassurance = f"β **{parsed_label}:** {context.run_count} {runs_label}" | |
| if len(orch.session_snapshots) > 0: | |
| reassurance += f" {across_label} {len(orch.session_snapshots)} {weeks_label}" | |
| # Insights mapping | |
| lever_heading = t["insights_primary_lever_heading"] | |
| lever_content = f"{reassurance}\n\n### {lever_heading}\n{context.insights['primary_lever']['message']}" | |
| details_content = context.insights['primary_lever']['constraint'] | |
| return [ | |
| gr.update(label=t["title"]), # title_md | |
| gr.update(value=t["subtitle"]), # subtitle_md | |
| gr.update(value=t["evolution_workflow"] if storage_enabled else t["workflow"]), # workflow_md | |
| gr.update(label="π " + t["tab_analyse"]), # tab_analyse | |
| gr.update(label="π‘ " + t["tab_intelligence"]), # tab_intelligence | |
| gr.update(label="π€ " + t["tab_profile"]), # tab_profile | |
| gr.update(label="π " + t["tab_home"]), # tab_home | |
| gr.update(label=t["sec_upload"]), # sec_upload_md | |
| gr.update(value=t["upload_hints"]), # upload_hints_md | |
| gr.update(label=t["upload_label"], value=None), # upload | |
| gr.update(value=t["btn_analyse"]), # run_btn | |
| gr.update(value=t["btn_reset"]), # reset_btn | |
| gr.update(label=t["lbl_local_folder_path"]), # folder_input | |
| gr.update(value=t["btn_analyse_folder"]), # run_folder_btn | |
| gr.update(label=t["lbl_risk"]), # risk_level_box | |
| gr.update(label=t["lbl_lever"], value=lever_content or ""), # lever_box | |
| gr.update(value=f"### {t['insights_risk_signal_heading']} \n {details_content or ''}"), # details_box | |
| gr.update(label="ποΈ " + t["sec_plan"]), # sec_plan_md | |
| gr.update(label="π " + t["sec_charts"]), # sec_charts_md | |
| gr.update(label=t["acc_adjustments"]), # adjustments_acc | |
| gr.update(label=t["lbl_pace_trend"]), # pace_plot | |
| gr.update(label=t["lbl_hr_trend"]), # hr_plot | |
| gr.update(value=t["plan_pending"]), # plan_body | |
| gr.update(label="π " + t["lbl_coach_brief"]), # home_brief_header | |
| gr.update(label=t["tab_coach"]), # chatbot | |
| gr.update(label=t["chat_placeholder"], placeholder=t["chat_placeholder"]), # msg | |
| gr.update(value=t["btn_send"]), # submit_btn | |
| gr.update(value=t["btn_clear"]), # clear | |
| gr.update(value=t["starter_limiter"]), # starter_limiter | |
| gr.update(value=t["starter_why"]), # starter_why | |
| gr.update(value=t["starter_fatigue"]), # starter_fatigue | |
| gr.update(value=indicator_val, visible=indicator_visible), # context_indicator | |
| gr.update(label=t["acc_knowledge_base"]), # knowledge_acc | |
| gr.update(label=t["lbl_category"]), # cat_drop | |
| gr.update(label=t["lbl_template"]), # template_drop | |
| gr.update(label=t["lbl_preview"]), # template_preview | |
| gr.update(value=t["btn_insert_message"]), # use_btn | |
| gr.update(label=t["lbl_details"]), # details_acc | |
| gr.update(label=t["banner_title"]), # banner_acc | |
| gr.update(value=f"- {t['banner_session']}\n- {t[storage_key]}\n- {t['banner_medical']}\n- {t['banner_full']}"), # banner_md | |
| gr.update(value=t["sec_profile"]), # sec_profile | |
| gr.update(label=t["lbl_display_name"]), # prof_display_name | |
| gr.update(label=t["lbl_age"]), # prof_age | |
| gr.update(label=t["lbl_experience"], choices=[(t["lbl_exp_beginner"], "beginner"), (t["lbl_exp_intermediate"], "intermediate"), (t["lbl_exp_advanced"], "advanced")]), # prof_experience | |
| gr.update(label=t["lbl_injury_notes"]), # prof_notes | |
| gr.update(value=t["btn_save_profile"]), # prof_save_btn | |
| gr.update(label=t["lbl_baseline"]), # prof_baseline | |
| gr.update(label=t["lbl_gender"], choices=[(t["lbl_gender_male"], "male"), (t["lbl_gender_female"], "female"), (t["lbl_gender_pns"], "prefer_not_to_say"), (t.get("lbl_gender_none", "None"), None)]), # prof_gender | |
| gr.update(label=t["sec_goal"]), # sec_goal_prof | |
| gr.update(label=t["lbl_goal_type"], choices=[(t["goal_type_race"], "race"), (t["goal_type_volume"], "weekly_volume_km"), (t["goal_type_pace"], "pace")]), # goal_type | |
| gr.update(label=t["lbl_target"]), # goal_target | |
| gr.update(label=t["lbl_unit"]), # goal_unit | |
| gr.update(label=t["lbl_date"]), # goal_date | |
| gr.update(value=t["btn_save_goal"]), # goal_save_btn | |
| gr.update(label=t["lbl_current_state"]), # current_state_lbl | |
| gr.update(value="### " + t["lbl_health_signal"]), # health_signal_lbl | |
| gr.update(value="### " + t["lbl_goal_trajectory"]), # goal_trajectory_lbl | |
| gr.update(label="π§ " + t["lbl_key_insight"]), # key_insight_lbl | |
| gr.update(label="π― " + t["lbl_forward_focus"]), # forward_focus_lbl | |
| gr.update(label="ποΈ " + t["lbl_your_week"]), # your_week_lbl | |
| gr.update(label=t["lbl_home_week_range"], choices=[(t["current_week_label"], "current"), (t["last_week_label"], "last")]), # home_week_toggle | |
| gr.update(label="π§± " + t["sec_structure"]), # home_structure_md | |
| gr.update(label=t["sec_goal"]), # home_goal_title | |
| gr.update(label=t["rec_title"]), # home_rec_header | |
| gr.update(label="π " + t["runner_positioning_title"]), # home_evidence_header | |
| gr.update(label="π§ " + t["lbl_key_insight"]), # home_insights_header | |
| gr.update(label="π " + t["next_run.title"]), # home_next_run_header | |
| gr.update(label="π " + t["performance_first_week_title"]), # performance_card_group | |
| gr.update(label="π " + t["sec_chat"]), # chat_header | |
| *home_vals_list, | |
| *pos_vals_list, | |
| ] | |
| language_selector.change( | |
| update_language, | |
| inputs=[language_selector, orchestrator_state, home_week_toggle], | |
| outputs=all_localized_outputs, | |
| show_progress="hidden", | |
| ) | |
| async def _ensure_home_brief_lang(orch, week_selection, lang): | |
| """Helper to ensure brief is translated during transitions.""" | |
| try: | |
| target_monday = get_start_week_date(week_selection) | |
| context = await orch.get_or_build_intelligence_snapshot(target_monday, language=lang) | |
| if not context: | |
| return None, None | |
| return context.performance_brief, context.performance_focus | |
| except Exception as e: | |
| logger.error(f"Error in _ensure_home_brief_lang: {e}") | |
| return None, None | |
| async def change_week(orch, week_selection, lang): | |
| try: | |
| # --- LAZY INTELLIGENCE TRIGGER --- | |
| #from ingestion.weekly_features import get_start_week_date | |
| #target_monday = get_start_week_date(week_selection) | |
| #snapshot = await orch.get_or_build_intelligence_snapshot(target_monday, language=lang) | |
| # Ensure brief matches current language when switching weeks | |
| await _ensure_home_brief_lang(orch, week_selection, lang) | |
| return await _format_home_dashboard(orch, language=lang, week_selection=week_selection) | |
| except Exception as e: | |
| logger.error(f"Error in change_week: {e}") | |
| # Return empty dashboard state to avoid UI crash | |
| t = UI_TEXT.get(lang, UI_TEXT["en"]) | |
| no_data = t.get("error_no_data", "No data available.") | |
| return ( | |
| f"### {week_selection.title()} Week", # 0: home_week_range | |
| "", # 1: evolution_md | |
| no_data, # 2: brief_content | |
| "", # 3: focus_content | |
| no_data, # 4: snapshot_md | |
| no_data, # 5: structure_md | |
| "", # 6: goal_summary | |
| "", # 7: goal_status_badge | |
| gr.update(value=0), # 8: goal_progress_val | |
| gr.update(visible=False), # 9: card_html | |
| None, # 10: card_state | |
| gr.update(visible=False), # 11: btn_download | |
| gr.update(visible=False), # 12: next_run_card | |
| gr.update(visible=False), # 13: coach_insight | |
| gr.update(visible=False), # 14: health_signal | |
| gr.update(visible=False), # 15: goal_trajectory | |
| gr.update(visible=False), # 16: evidence_md | |
| gr.update(visible=False), # 17: recommendation_md | |
| gr.update(visible=False), # 18: insights_group | |
| gr.update(visible=False), # 19: evidence_group | |
| gr.update(visible=False), # 20: rec_group | |
| gr.update(visible=False), # 21: goal_group | |
| gr.update(visible=False), # 22: structure_group | |
| gr.update(visible=False), # 23: performance_card_group | |
| gr.update(visible=False), # 24: home_next_run_header | |
| gr.update(visible=False), # 25: your_week_group | |
| gr.update(value=None), # 26: pace_plot | |
| gr.update(value=None), # 27: hr_plot | |
| ) | |
| home_week_toggle.change( | |
| change_week_with_progress, | |
| inputs=[orchestrator_state, home_week_toggle, language_selector], | |
| outputs=[progress_box, *home_outputs], | |
| show_progress="hidden", | |
| ) | |
| build_interface.initialize_app = initialize_app | |
| build_interface.update_language = update_language | |
| build_interface._ensure_home_brief_lang = _ensure_home_brief_lang | |
| build_interface.change_week = change_week | |
| logger.info(f"Build Interface render time: {time.time() - start_time:.2f}s") | |
| return demo | |
| def launch_app() -> None: | |
| """Launch the Gradio UI. | |
| This is the canonical entrypoint for Docker/HuggingFace Spaces. | |
| """ | |
| from dotenv import load_dotenv | |
| # Load env from .env when running locally; in HF Spaces env is provided via Settings. | |
| load_dotenv(override=True) | |
| # HF Spaces sets PORT; locally we should prefer Gradio defaults (auto-pick a free port) | |
| # unless the user explicitly sets a port. | |
| import config | |
| # User can explicitly force a port via GRADIO_SERVER_PORT or PORT. | |
| explicit_port = os.getenv("GRADIO_SERVER_PORT") or os.getenv("PORT") | |
| # Avoid opening a browser in containers. | |
| inbrowser = not config.is_hf_space() | |
| # Default debug to false unless explicitly enabled. | |
| debug = os.getenv("DEBUG", "false").lower() in {"1", "true", "yes"} | |
| demo = build_interface() | |
| # In HF Spaces (or if a port is explicitly provided), bind to that port. | |
| # Otherwise, let Gradio choose an available port locally. | |
| # Add Cloudfare Website Analytics | |
| # Add custom HTML with analytics | |
| analytics_js = """<!-- Cloudflare Web Analytics --> | |
| <script defer src='https://static.cloudflareinsights.com/beacon.min.js' data-cf-beacon='{"token": "03ef8be3646e4e25bb990f907377bc55", "spa": true, "version": 2}'></script> | |
| <!-- End Cloudflare Web Analytics -->""" | |
| demo.head = analytics_js # This adds the script to the <head> section | |
| if config.is_hf_space() or explicit_port: | |
| port = int(explicit_port or "7860") | |
| demo.launch(server_name="0.0.0.0", debug=debug, inbrowser=inbrowser) | |
| else: | |
| demo.launch(server_name="0.0.0.0", debug=debug, inbrowser=True) | |
| if __name__ == "__main__": | |
| launch_app() | |