Spaces:
Running
Running
Commit ·
d64fd55
0
Parent(s):
HF Space deploy snapshot (minimal allow-list)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- Dockerfile +27 -0
- README.md +19 -0
- app.py +13 -0
- configs/prompt_templates.json +76 -0
- docs/readme_hf.md +19 -0
- pyproject.toml +56 -0
- requirements.txt +150 -0
- schemas/prompt_templates.schema.json +44 -0
- src/__init__.py +0 -0
- src/_app/__init__.py +0 -0
- src/_app/__main__.py +53 -0
- src/_app/gradio_app.py +0 -0
- src/_app/presentation/ui_text.py +659 -0
- src/_app/ui/coaching_helpers.py +157 -0
- src/agents/base.py +9 -0
- src/agents/chat_agent.py +161 -0
- src/agents/feature_engineering/agent.py +44 -0
- src/agents/file_integrity/agent.py +6 -0
- src/agents/guardrail_agent.py +132 -0
- src/agents/insights/agent.py +204 -0
- src/agents/orchestrator.py +1170 -0
- src/agents/plan/agent.py +165 -0
- src/agents/visualization/__init__.py +3 -0
- src/agents/visualization/agent.py +114 -0
- src/application/dto/runner_api_response.py +25 -0
- src/application/goal_trajectory_service.py +10 -0
- src/application/positioning_service.py +187 -0
- src/application/recommendation_service.py +82 -0
- src/application/runner_positioning_service.py +43 -0
- src/config.py +109 -0
- src/core/intelligence/intelligence_builder.py +308 -0
- src/core/intelligence/intelligence_serializer.py +49 -0
- src/core/intelligence/runner_intelligence_snapshot.py +82 -0
- src/core/pipeline/context.py +76 -0
- src/core/pipeline/pipeline.py +43 -0
- src/core/pipeline/step.py +12 -0
- src/domain/goals/goal_trajectory.py +13 -0
- src/domain/goals/goal_trajectory_engine.py +81 -0
- src/domain/runner/goal.py +30 -0
- src/domain/runner/profile.py +31 -0
- src/domain/runner_positioning.py +101 -0
- src/domain/training/agent_models.py +79 -0
- src/domain/training/charts.py +22 -0
- src/domain/training/period_comparison.py +12 -0
- src/domain/training/planned_session.py +23 -0
- src/domain/training/run.py +23 -0
- src/domain/training/training_recommendation.py +12 -0
- src/domain/training/trend_snapshot.py +48 -0
- src/domain/training/weekly_snapshot.py +62 -0
- src/domain/training/weekly_snapshot_builder.py +60 -0
Dockerfile
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
| 4 |
+
PYTHONUNBUFFERED=1 \
|
| 5 |
+
PIP_NO_CACHE_DIR=1 \
|
| 6 |
+
HF_SPACE=true \
|
| 7 |
+
STORAGE_ENABLED=false
|
| 8 |
+
|
| 9 |
+
WORKDIR /app
|
| 10 |
+
|
| 11 |
+
# System deps (keep minimal; add build-essential only if you need wheels compiled)
|
| 12 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 13 |
+
git \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 15 |
+
|
| 16 |
+
# Copy code
|
| 17 |
+
COPY . /app
|
| 18 |
+
|
| 19 |
+
# Install dependencies
|
| 20 |
+
# Prefer requirements.txt if you have it; otherwise generate one from pyproject for Docker usage.
|
| 21 |
+
RUN pip install --upgrade pip \
|
| 22 |
+
&& pip install -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# HuggingFace Spaces uses port 7860
|
| 25 |
+
EXPOSE 7860
|
| 26 |
+
|
| 27 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Runner Agentic Intelligence
|
| 3 |
+
emoji: 🏃♂️
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Runner Agentic Intelligence (Public Preview)
|
| 11 |
+
|
| 12 |
+
Upload your Strava exports (TCX / FIT / FIT.GZ) to generate insights and a weekly plan.
|
| 13 |
+
|
| 14 |
+
**Notes**
|
| 15 |
+
- This is an experimental demo
|
| 16 |
+
- Session data is temporary
|
| 17 |
+
- Not medical advice
|
| 18 |
+
|
| 19 |
+
For local installation and full project docs, see the GitHub repository.
|
app.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Docker / HuggingFace Spaces entrypoint.
|
| 2 |
+
|
| 3 |
+
This file intentionally does NOT force demo-mode defaults.
|
| 4 |
+
|
| 5 |
+
- In HuggingFace Spaces / Docker, set env vars (e.g., HF_SPACE=true, STORAGE_ENABLED=false)
|
| 6 |
+
via Space Settings or Dockerfile ENV.
|
| 7 |
+
- For local development, you can run `python src/app/gradio_app.py` directly.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from src._app.gradio_app import launch_app
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
launch_app()
|
configs/prompt_templates.json
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"templates": [
|
| 3 |
+
{
|
| 4 |
+
"id": "insights_latest",
|
| 5 |
+
"category": "Insights",
|
| 6 |
+
"label": "Latest Run Insights",
|
| 7 |
+
"description": "Summarise the most recent run with actionable coaching insights.",
|
| 8 |
+
"templateBody": "Based on the most recent uploaded run(s), provide:\n- key observations that materially affect training decisions,\n- any elevated risk signals,\n- the single primary lever I should focus on next.",
|
| 9 |
+
"example": "Latest Run Insights — key observations, risk signal, primary lever"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"id": "insights_trends",
|
| 13 |
+
"category": "Insights",
|
| 14 |
+
"label": "Training Trends",
|
| 15 |
+
"description": "Identify performance and fatigue trends across recent runs.",
|
| 16 |
+
"templateBody": "Look at my recent runs and identify trends in pace, consistency, and fatigue. Highlight anything that should influence my next training decisions.",
|
| 17 |
+
"example": "Training Trends — how my pacing and fatigue have changed recently"
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"id": "insights_risk",
|
| 21 |
+
"category": "Insights",
|
| 22 |
+
"label": "Risk Assessment",
|
| 23 |
+
"description": "Check for injury or fatigue risk based on recent data.",
|
| 24 |
+
"templateBody": "Based on my recent runs, assess whether there are any injury, fatigue, or stagnation risks. If none, explicitly state that no elevated risk is detected.",
|
| 25 |
+
"example": "Risk Assessment — is there any elevated injury or fatigue risk?"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"id": "plan_weekly_standard",
|
| 29 |
+
"category": "Weekly Plan",
|
| 30 |
+
"label": "Standard Weekly Plan",
|
| 31 |
+
"description": "Generate a balanced 7-day running plan based on recent data.",
|
| 32 |
+
"templateBody": "Using my recent runs and current fitness, generate a 7-day running plan (Monday–Sunday) with a brief summary, daily sessions, and one focus reminder for the week.",
|
| 33 |
+
"example": "Weekly Plan — generate a full week plan from my recent runs"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"id": "plan_weekly_constraints",
|
| 37 |
+
"category": "Weekly Plan",
|
| 38 |
+
"label": "Weekly Plan with Constraints",
|
| 39 |
+
"description": "Generate a weekly plan with explicit constraints.",
|
| 40 |
+
"templateBody": "Generate a weekly running plan with the following constraints:\n- no hard runs on weekdays,\n- limited total training time,\n- prioritise injury risk reduction.\nExplain any trade-offs made.",
|
| 41 |
+
"example": "Plan with Constraints — no hard runs on weekdays, limited time"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"id": "history_recent_insights",
|
| 45 |
+
"category": "History",
|
| 46 |
+
"label": "Recent Stored Insights",
|
| 47 |
+
"description": "Retrieve and summarise recently persisted insights.",
|
| 48 |
+
"templateBody": "Retrieve the most recent stored insights and summarise them with timestamps and key messages.",
|
| 49 |
+
"example": "Recent Stored Insights — show the last 4 insights"
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"id": "history_insights_by_date",
|
| 53 |
+
"category": "History",
|
| 54 |
+
"label": "Insights by Date",
|
| 55 |
+
"description": "Retrieve stored insights for a specific date or range.",
|
| 56 |
+
"templateBody": "Retrieve stored insights for the specified date or date range and summarise the key coaching messages.",
|
| 57 |
+
"example": "Insights by Date — show insights from last week"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"id": "system_router_decision",
|
| 61 |
+
"category": "System",
|
| 62 |
+
"label": "Router Decision Explanation",
|
| 63 |
+
"description": "Explain how the system routed the last request.",
|
| 64 |
+
"templateBody": "For the most recent analysis, explain which route was chosen (chat or chart) and why. Include any fallback reason if applicable.",
|
| 65 |
+
"example": "Router Decision — what route was chosen and why?"
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"id": "chat_load_prior_insights",
|
| 69 |
+
"category": "Chat",
|
| 70 |
+
"label": "Load Prior Insights",
|
| 71 |
+
"description": "Force the assistant to include recent stored insights in its reasoning.",
|
| 72 |
+
"templateBody": "Before answering, load my recent stored insights and include them explicitly in your reasoning.",
|
| 73 |
+
"example": "Load prior insights before answering my question"
|
| 74 |
+
}
|
| 75 |
+
]
|
| 76 |
+
}
|
docs/readme_hf.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Runner Agentic Intelligence
|
| 3 |
+
emoji: 🏃♂️
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Runner Agentic Intelligence (Public Preview)
|
| 11 |
+
|
| 12 |
+
Upload your Strava exports (TCX / FIT / FIT.GZ) to generate insights and a weekly plan.
|
| 13 |
+
|
| 14 |
+
**Notes**
|
| 15 |
+
- This is an experimental demo
|
| 16 |
+
- Session data is temporary
|
| 17 |
+
- Not medical advice
|
| 18 |
+
|
| 19 |
+
For local installation and full project docs, see the GitHub repository.
|
pyproject.toml
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "runner-agentic-intelligence"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Agentic Intelligence for Runners MVP — Strava/Apple Health ingestion + insights + weekly plans powered by multi-agent system."
|
| 5 |
+
authors = [
|
| 6 |
+
{ name = "Alexandre Franco" }
|
| 7 |
+
]
|
| 8 |
+
readme = "README.md"
|
| 9 |
+
requires-python = ">=3.10"
|
| 10 |
+
|
| 11 |
+
# Runtime dependencies
|
| 12 |
+
dependencies = [
|
| 13 |
+
"gradio",
|
| 14 |
+
"matplotlib",
|
| 15 |
+
"lxml",
|
| 16 |
+
"gpxpy",
|
| 17 |
+
"pandas", # useful for aggregations if needed
|
| 18 |
+
"numpy", # lightweight processing
|
| 19 |
+
"google-adk>=1.18.0",
|
| 20 |
+
"opentelemetry-instrumentation-google-genai>=0.4b0",
|
| 21 |
+
"vertexai>=1.43.0",
|
| 22 |
+
"black>=25.11.0",
|
| 23 |
+
"flake8>=7.3.0",
|
| 24 |
+
"litellm",
|
| 25 |
+
"pydantic>=2.11.10",
|
| 26 |
+
"fitdecode>=0.10.0",
|
| 27 |
+
"jsonschema>=4.21.0",
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
[project.optional-dependencies]
|
| 31 |
+
dev = [
|
| 32 |
+
"black",
|
| 33 |
+
"flake8",
|
| 34 |
+
"pytest",
|
| 35 |
+
"pytest-asyncio",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
[tool.black]
|
| 39 |
+
line-length = 100
|
| 40 |
+
target-version = ["py310"]
|
| 41 |
+
|
| 42 |
+
[tool.flake8]
|
| 43 |
+
max-line-length = 100
|
| 44 |
+
extend-ignore = ["E203"]
|
| 45 |
+
exclude = [
|
| 46 |
+
".venv",
|
| 47 |
+
"build",
|
| 48 |
+
"dist",
|
| 49 |
+
"__pycache__",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
[tool.uv]
|
| 53 |
+
# ensures uv uses a dedicated .venv folder inside project
|
| 54 |
+
# (HF Spaces ignores local venvs but uv handles it seamlessly)
|
| 55 |
+
index-strategy = "first-index"
|
| 56 |
+
cache-dir = ".uv-cache"
|
requirements.txt
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==24.1.0
|
| 2 |
+
aiohappyeyeballs==2.6.1
|
| 3 |
+
aiohttp==3.13.3
|
| 4 |
+
aiosignal==1.4.0
|
| 5 |
+
alembic==1.17.2
|
| 6 |
+
annotated-doc==0.0.4
|
| 7 |
+
annotated-types==0.7.0
|
| 8 |
+
anyio==4.11.0
|
| 9 |
+
attrs==25.4.0
|
| 10 |
+
authlib==1.6.5
|
| 11 |
+
black==25.11.0
|
| 12 |
+
brotli==1.2.0
|
| 13 |
+
cachetools==6.2.2
|
| 14 |
+
certifi==2025.11.12
|
| 15 |
+
cffi==2.0.0
|
| 16 |
+
charset-normalizer==3.4.4
|
| 17 |
+
click==8.3.1
|
| 18 |
+
cloudpickle==3.1.2
|
| 19 |
+
contourpy==1.3.3
|
| 20 |
+
cryptography==46.0.3
|
| 21 |
+
cycler==0.12.1
|
| 22 |
+
distro==1.9.0
|
| 23 |
+
docstring-parser==0.17.0
|
| 24 |
+
fastapi==0.121.2
|
| 25 |
+
fastuuid==0.14.0
|
| 26 |
+
ffmpy==1.0.0
|
| 27 |
+
filelock==3.20.0
|
| 28 |
+
fitdecode==0.11.0
|
| 29 |
+
flake8==7.3.0
|
| 30 |
+
fonttools==4.60.1
|
| 31 |
+
frozenlist==1.8.0
|
| 32 |
+
fsspec==2025.10.0
|
| 33 |
+
googleapis-common-protos==1.72.0
|
| 34 |
+
gpxpy==1.6.2
|
| 35 |
+
gradio==5.49.1
|
| 36 |
+
gradio-client==1.13.3
|
| 37 |
+
graphviz==0.21
|
| 38 |
+
greenlet==3.2.4
|
| 39 |
+
groovy==0.1.2
|
| 40 |
+
grpc-interceptor==0.15.4
|
| 41 |
+
grpcio==1.76.0
|
| 42 |
+
grpcio-status==1.76.0
|
| 43 |
+
h11==0.16.0
|
| 44 |
+
hf-xet==1.2.0
|
| 45 |
+
httpcore==1.0.9
|
| 46 |
+
httplib2==0.31.0
|
| 47 |
+
httpx==0.28.1
|
| 48 |
+
httpx-sse==0.4.3
|
| 49 |
+
huggingface-hub==1.1.4
|
| 50 |
+
idna==3.11
|
| 51 |
+
importlib-metadata==8.7.0
|
| 52 |
+
iniconfig==2.3.0
|
| 53 |
+
jinja2==3.1.6
|
| 54 |
+
jiter==0.12.0
|
| 55 |
+
jsonschema==4.25.1
|
| 56 |
+
jsonschema-specifications==2025.9.1
|
| 57 |
+
kiwisolver==1.4.9
|
| 58 |
+
litellm==1.81.3
|
| 59 |
+
lxml==6.0.2
|
| 60 |
+
mako==1.3.10
|
| 61 |
+
markdown-it-py==4.0.0
|
| 62 |
+
markupsafe==3.0.3
|
| 63 |
+
matplotlib==3.10.7
|
| 64 |
+
mccabe==0.7.0
|
| 65 |
+
mcp==1.21.2
|
| 66 |
+
mdurl==0.1.2
|
| 67 |
+
multidict==6.7.1
|
| 68 |
+
mypy-extensions==1.1.0
|
| 69 |
+
numpy==2.3.5
|
| 70 |
+
openai==2.15.0
|
| 71 |
+
opentelemetry-api==1.37.0
|
| 72 |
+
opentelemetry-exporter-gcp-logging==1.11.0a0
|
| 73 |
+
opentelemetry-exporter-gcp-monitoring==1.11.0a0
|
| 74 |
+
opentelemetry-exporter-gcp-trace==1.11.0
|
| 75 |
+
opentelemetry-exporter-otlp-proto-common==1.37.0
|
| 76 |
+
opentelemetry-exporter-otlp-proto-http==1.37.0
|
| 77 |
+
opentelemetry-instrumentation==0.58b0
|
| 78 |
+
opentelemetry-proto==1.37.0
|
| 79 |
+
opentelemetry-resourcedetector-gcp==1.11.0a0
|
| 80 |
+
opentelemetry-sdk==1.37.0
|
| 81 |
+
opentelemetry-semantic-conventions==0.58b0
|
| 82 |
+
opentelemetry-util-genai==0.2b0
|
| 83 |
+
orjson==3.11.4
|
| 84 |
+
packaging==25.0
|
| 85 |
+
pandas==2.3.3
|
| 86 |
+
pathspec==0.12.1
|
| 87 |
+
pillow==11.3.0
|
| 88 |
+
platformdirs==4.5.0
|
| 89 |
+
pluggy==1.6.0
|
| 90 |
+
propcache==0.4.1
|
| 91 |
+
proto-plus==1.26.1
|
| 92 |
+
protobuf==6.33.1
|
| 93 |
+
pyasn1==0.6.1
|
| 94 |
+
pyasn1-modules==0.4.2
|
| 95 |
+
pycodestyle==2.14.0
|
| 96 |
+
pycparser==2.23
|
| 97 |
+
pydantic==2.11.10
|
| 98 |
+
pydantic-core==2.33.2
|
| 99 |
+
pydantic-settings==2.12.0
|
| 100 |
+
pydub==0.25.1
|
| 101 |
+
pyflakes==3.4.0
|
| 102 |
+
pygments==2.19.2
|
| 103 |
+
pyjwt==2.10.1
|
| 104 |
+
pyparsing==3.2.5
|
| 105 |
+
pytest==9.0.2
|
| 106 |
+
pytest-asyncio==1.3.0
|
| 107 |
+
python-dateutil==2.9.0.post0
|
| 108 |
+
python-dotenv==1.2.1
|
| 109 |
+
python-multipart==0.0.20
|
| 110 |
+
pytokens==0.3.0
|
| 111 |
+
pytz==2025.2
|
| 112 |
+
pyyaml==6.0.3
|
| 113 |
+
referencing==0.37.0
|
| 114 |
+
regex==2026.1.15
|
| 115 |
+
requests==2.32.5
|
| 116 |
+
rich==14.2.0
|
| 117 |
+
rpds-py==0.29.0
|
| 118 |
+
rsa==4.9.1
|
| 119 |
+
ruff==0.14.5
|
| 120 |
+
safehttpx==0.1.7
|
| 121 |
+
semantic-version==2.10.0
|
| 122 |
+
shapely==2.1.2
|
| 123 |
+
shellingham==1.5.4
|
| 124 |
+
six==1.17.0
|
| 125 |
+
sniffio==1.3.1
|
| 126 |
+
sqlalchemy==2.0.44
|
| 127 |
+
sqlalchemy-spanner==1.17.1
|
| 128 |
+
sqlparse==0.5.3
|
| 129 |
+
sse-starlette==3.0.3
|
| 130 |
+
starlette==0.49.3
|
| 131 |
+
tenacity==9.1.2
|
| 132 |
+
tiktoken==0.12.0
|
| 133 |
+
tokenizers==0.22.2
|
| 134 |
+
tomlkit==0.13.3
|
| 135 |
+
tqdm==4.67.1
|
| 136 |
+
typer==0.20.0
|
| 137 |
+
typer-slim==0.20.0
|
| 138 |
+
typing-extensions==4.15.0
|
| 139 |
+
typing-inspection==0.4.2
|
| 140 |
+
tzdata==2025.2
|
| 141 |
+
tzlocal==5.3.1
|
| 142 |
+
uritemplate==4.2.0
|
| 143 |
+
urllib3==2.5.0
|
| 144 |
+
uvicorn==0.38.0
|
| 145 |
+
vertexai==1.43.0
|
| 146 |
+
watchdog==6.0.0
|
| 147 |
+
websockets==15.0.1
|
| 148 |
+
wrapt==1.17.3
|
| 149 |
+
yarl==1.22.0
|
| 150 |
+
zipp==3.23.0
|
schemas/prompt_templates.schema.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "http://json-schema.org/draft-07/schema#",
|
| 3 |
+
"title": "PromptTemplatesConfig",
|
| 4 |
+
"type": "object",
|
| 5 |
+
"properties": {
|
| 6 |
+
"templates": {
|
| 7 |
+
"type": "array",
|
| 8 |
+
"items": {
|
| 9 |
+
"type": "object",
|
| 10 |
+
"properties": {
|
| 11 |
+
"id": {
|
| 12 |
+
"type": "string"
|
| 13 |
+
},
|
| 14 |
+
"category": {
|
| 15 |
+
"type": "string"
|
| 16 |
+
},
|
| 17 |
+
"label": {
|
| 18 |
+
"type": "string"
|
| 19 |
+
},
|
| 20 |
+
"description": {
|
| 21 |
+
"type": "string"
|
| 22 |
+
},
|
| 23 |
+
"templateBody": {
|
| 24 |
+
"type": "string"
|
| 25 |
+
},
|
| 26 |
+
"example": {
|
| 27 |
+
"type": "string"
|
| 28 |
+
}
|
| 29 |
+
},
|
| 30 |
+
"required": [
|
| 31 |
+
"id",
|
| 32 |
+
"category",
|
| 33 |
+
"label",
|
| 34 |
+
"templateBody"
|
| 35 |
+
],
|
| 36 |
+
"additionalProperties": false
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
},
|
| 40 |
+
"required": [
|
| 41 |
+
"templates"
|
| 42 |
+
],
|
| 43 |
+
"additionalProperties": false
|
| 44 |
+
}
|
src/__init__.py
ADDED
|
File without changes
|
src/_app/__init__.py
ADDED
|
File without changes
|
src/_app/__main__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# src/app/__main__.py
|
| 2 |
+
import argparse
|
| 3 |
+
import sys
|
| 4 |
+
import json
|
| 5 |
+
import dataclasses
|
| 6 |
+
|
| 7 |
+
# Deferred import
|
| 8 |
+
# from .gradio_app import main
|
| 9 |
+
from prompts.store import get_template_store
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def handle_prompts(args):
|
| 13 |
+
try:
|
| 14 |
+
store = get_template_store()
|
| 15 |
+
except Exception as e:
|
| 16 |
+
print(json.dumps({"error": f"Failed to load templates: {str(e)}"}), file=sys.stderr)
|
| 17 |
+
sys.exit(1)
|
| 18 |
+
|
| 19 |
+
if args.list_prompts:
|
| 20 |
+
templates = []
|
| 21 |
+
if args.category:
|
| 22 |
+
templates = store.list_by_category(args.category)
|
| 23 |
+
else:
|
| 24 |
+
templates = store.list_templates()
|
| 25 |
+
|
| 26 |
+
output = {"count": len(templates), "templates": [dataclasses.asdict(t) for t in templates]}
|
| 27 |
+
print(json.dumps(output, indent=2))
|
| 28 |
+
sys.exit(0)
|
| 29 |
+
|
| 30 |
+
if args.get_prompt:
|
| 31 |
+
template = store.get_template(args.get_prompt)
|
| 32 |
+
if not template:
|
| 33 |
+
print(json.dumps({"error": f"Template not found: {args.get_prompt}"}), file=sys.stderr)
|
| 34 |
+
sys.exit(1)
|
| 35 |
+
print(json.dumps(dataclasses.asdict(template), indent=2))
|
| 36 |
+
sys.exit(0)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
parser = argparse.ArgumentParser(description="Runner Agentic Intelligence")
|
| 41 |
+
parser.add_argument("--list-prompts", action="store_true", help="List all prompt templates")
|
| 42 |
+
parser.add_argument("--category", type=str, help="Filter templates by category")
|
| 43 |
+
parser.add_argument("--get-prompt", type=str, help="Get a specific prompt template by ID")
|
| 44 |
+
|
| 45 |
+
# Parse known args to avoid conflict with Gradio or other potential subcommands
|
| 46 |
+
args, unknown = parser.parse_known_args()
|
| 47 |
+
|
| 48 |
+
if args.list_prompts or args.get_prompt:
|
| 49 |
+
handle_prompts(args)
|
| 50 |
+
else:
|
| 51 |
+
from .gradio_app import launch_app
|
| 52 |
+
|
| 53 |
+
launch_app()
|
src/_app/gradio_app.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/_app/presentation/ui_text.py
ADDED
|
@@ -0,0 +1,659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# src/domain/i18n.py
|
| 2 |
+
from typing import Dict, Any
|
| 3 |
+
|
| 4 |
+
# UI Localization Text
|
| 5 |
+
UI_TEXT = {
|
| 6 |
+
"en": {
|
| 7 |
+
"welcome": "Welcome to Runner Intelligence. Upload your first run and the system will begin building your training baseline and generating insights.",
|
| 8 |
+
"title": "# 🏃 Runner Agentic Intelligence (Experimental)",
|
| 9 |
+
"subtitle": '<span class="muted">Personalized coaching from your training data.</span>',
|
| 10 |
+
"workflow": '<span class="muted"><b>Workflow:</b> Upload runs ➔ Get Insights ➔ Review Weekly Plan ➔ Chat with Coach</span><br/><span class="muted"><i>Supported formats: .fit, .fit.gz, .tcx, .tcx.gz, .gpx (max 50MB per file)</i></span><br/><span class="muted">Feedback: <a href="https://github.com/ideas-to-life/runner-agentic-intelligence/issues" target="_blank">GitHub Issues</a> or DM me</span>',
|
| 11 |
+
"evolution_workflow": '<span class="muted">Track your evolution. Review your weekly check-in and progress vs last month.</span><br/><span class="muted">Upload new runs anytime — your dashboard updates automatically.</span><br/><span class="muted">Feedback: <a href="https://github.com/ideas-to-life/runner-agentic-intelligence/issues" target="_blank">GitHub Issues</a> or DM me</span>',
|
| 12 |
+
"tab_analyse": "Analyse",
|
| 13 |
+
"tab_results": "Results",
|
| 14 |
+
"tab_coach": "Coach",
|
| 15 |
+
"tab_home": "Home",
|
| 16 |
+
"tab_intelligence": "Intelligence",
|
| 17 |
+
"tab_positioning": "Positioning",
|
| 18 |
+
"lbl_your_week": "Your Week",
|
| 19 |
+
"lbl_current_state": "Current State",
|
| 20 |
+
"lbl_key_insight": "Key Insight",
|
| 21 |
+
"lbl_forward_focus": "This week's Focus",
|
| 22 |
+
"lbl_details": "Details",
|
| 23 |
+
"lbl_coach_brief": "Coach Brief",
|
| 24 |
+
"lbl_parsed": "Parsed",
|
| 25 |
+
"unit_weeks": "weeks",
|
| 26 |
+
"lbl_across": "across",
|
| 27 |
+
"lbl_current": "Current",
|
| 28 |
+
"lbl_previous": "Previous",
|
| 29 |
+
"lbl_distance": "Distance",
|
| 30 |
+
"lbl_runs": "Runs",
|
| 31 |
+
"lbl_avg_pace": "Avg Pace",
|
| 32 |
+
"lbl_consistency": "Consistency",
|
| 33 |
+
"goal_status_template": "Your goal trajectory is **{val}**.",
|
| 34 |
+
"home_story_template": "You ran **{count}** {unit} this week for a {of} **{dist:.1f} km**.",
|
| 35 |
+
"sec_evolution": "📈 Progress vs Last Month",
|
| 36 |
+
"sec_checkin": "## 🧠 Weekly Check-In",
|
| 37 |
+
"lbl_goal_traj_inline": "**🎯 Trajectory:**",
|
| 38 |
+
"lbl_focus_inline": "**🎯 Focus:**",
|
| 39 |
+
"sec_snapshot": "## 📅 Current Week Snapshot",
|
| 40 |
+
"sec_upload": "🗂️ Upload your training data",
|
| 41 |
+
"upload_hints": '<div class="muted" style="margin-bottom: 12px; font-size: 0.9rem;"><ul><li>Best results: upload 4–12 recent runs (or ~2–6 weeks).</li><li>Include HR data if you have it (helps fatigue/risk signals).</li><li>Then click ‘Analyse Data’ to populate Results + Coach context.</li></ul></div>',
|
| 42 |
+
"upload_label": "Drag and drop or click to upload",
|
| 43 |
+
"btn_analyse": "🚀 Analyse Data",
|
| 44 |
+
"btn_reset": "Reset",
|
| 45 |
+
"sec_insights": "Coaching Insights",
|
| 46 |
+
"lbl_risk": "Safety/Risk Assessment",
|
| 47 |
+
"lbl_lever": "Primary Lever",
|
| 48 |
+
"sec_plan": "Weekly Training Plan",
|
| 49 |
+
"sec_charts": "Trends & Analytics",
|
| 50 |
+
"sec_chat": "Ask about your training",
|
| 51 |
+
"chat_indicator_no": "⚠️ *Upload and analyze runs to enable chat context.*",
|
| 52 |
+
"chat_indicator_yes": "✅ *Chat context enabled.*",
|
| 53 |
+
"chat_tip": "Tip: try ‘Plot my pace’ or ‘Show HR chart’ to visualize your data.",
|
| 54 |
+
"chat_placeholder": "e.g., How was my pace last week?",
|
| 55 |
+
"btn_send": "Send",
|
| 56 |
+
"btn_clear": "Clear History",
|
| 57 |
+
"lang_label": "Language / Idioma",
|
| 58 |
+
"starter_limiter": "Biggest limiter?",
|
| 59 |
+
"starter_why": "Why this focus?",
|
| 60 |
+
"starter_fatigue": "Fatigue risk?",
|
| 61 |
+
"starter_keep": "What to keep doing?",
|
| 62 |
+
"chat_starter_limiter": "What is my biggest training limiter?",
|
| 63 |
+
"chat_starter_why": "Why did you suggest this training focus?",
|
| 64 |
+
"chat_starter_fatigue": "Do you see any signs of overtraining or fatigue?",
|
| 65 |
+
"chat_starter_keep": "What aspects of my training should I keep doing?",
|
| 66 |
+
"analyse_progress": "Analysing training data...",
|
| 67 |
+
"insights_primary_lever_heading": "Primary Lever",
|
| 68 |
+
"insights_risk_signal_heading": "Risk Signal",
|
| 69 |
+
"insights_key_observations_heading": "Key Observations",
|
| 70 |
+
"insights_evidence_label": "Evidence",
|
| 71 |
+
"insights_constraint_label": "Constraint",
|
| 72 |
+
"insights_no_primary_lever": "No primary lever identified.",
|
| 73 |
+
"insights_no_risk": "No elevated risk detected.",
|
| 74 |
+
"insights_no_observations": "*No observations provided.*",
|
| 75 |
+
"insights_analysis_failed": "*Analysis Failed*",
|
| 76 |
+
"insights_analysis_pending": "*Analysis pending...*",
|
| 77 |
+
"insights_raw_default": "Raw insights will appear here.",
|
| 78 |
+
"plan_week_summary_label": "Week summary:",
|
| 79 |
+
"plan_focus_label": "This week's focus:",
|
| 80 |
+
"plan_pending": "*Plan will appear here after analysis.*",
|
| 81 |
+
"progress_extracting": "Extracting features from run data...",
|
| 82 |
+
"progress_generating": "Generating insights and training plan...",
|
| 83 |
+
"pipeline_stage_1": "🏃 Uploading runs",
|
| 84 |
+
"pipeline_stage_2": "📊 Building weekly snapshot",
|
| 85 |
+
"pipeline_stage_3": "📈 Analyzing training trends",
|
| 86 |
+
"pipeline_stage_4": "🧠 Generating coach insights",
|
| 87 |
+
"pipeline_stage_5": "📋 Preparing training recommendation",
|
| 88 |
+
"init_stage_1": "⚙️ Initializing services...",
|
| 89 |
+
"init_stage_2": "👤 Loading runner profile...",
|
| 90 |
+
"init_stage_3": "📂 Loading training history...",
|
| 91 |
+
"init_stage_4": "🏠 Building dashboard...",
|
| 92 |
+
"week_stage_1": "📅 Switching week...",
|
| 93 |
+
"week_stage_2": "📊 Building weekly snapshot...",
|
| 94 |
+
"week_stage_3": "🏠 Updating dashboard...",
|
| 95 |
+
"chat_context_indicator": "🧠 **Using your training insights as context**",
|
| 96 |
+
"error_no_runs": "No runs to process.",
|
| 97 |
+
"lbl_details": "Details (Risk & Observations)",
|
| 98 |
+
"banner_title": "⚠️ Public Preview Information",
|
| 99 |
+
"banner_session": "**Session Data**: All uploaded data and chat history are temporary and destroyed after the session.",
|
| 100 |
+
"banner_persistence_disabled": "🟡 **Public Preview Mode** — No data is persisted. All uploads and derived metrics are temporary.",
|
| 101 |
+
"banner_persistence_enabled": "🟢 **Training History Persistence Enabled** — Weekly snapshots and trends are stored locally.",
|
| 102 |
+
"banner_medical": "**Medical Advice**: This system provides coaching insights, NOT medical advice. Consult a professional before starting any training plan.",
|
| 103 |
+
"banner_full": "**Full Experience**: For full persistence and local storage, run the app locally from [GitHub](https://github.com/avfranco/runner-agentic-intelligence).",
|
| 104 |
+
"insights_timestamp_label": "Insights generated on",
|
| 105 |
+
"risk_level_low": "LOW",
|
| 106 |
+
"risk_level_medium": "MEDIUM",
|
| 107 |
+
"risk_level_high": "HIGH",
|
| 108 |
+
"chat_error": "I'm sorry, I'm having trouble processing your request right now.",
|
| 109 |
+
"plan_error": "Could not generate plan at this time.",
|
| 110 |
+
"tab_profile": "Profile",
|
| 111 |
+
"sec_profile": "## Runner Profile (Digital Twin v1)",
|
| 112 |
+
"lbl_display_name": "Display Name",
|
| 113 |
+
"lbl_age": "Age",
|
| 114 |
+
"lbl_experience": "Experience Level",
|
| 115 |
+
"lbl_injury_notes": "Injury History / Notes",
|
| 116 |
+
"btn_save_profile": "Save Profile",
|
| 117 |
+
"profile_saved": "Profile saved successfully!",
|
| 118 |
+
"profile_save_error": "Error saving profile.",
|
| 119 |
+
"lbl_baseline": "Baseline Weekly Distance (km)",
|
| 120 |
+
"lbl_gender": "Gender",
|
| 121 |
+
"sec_goal": "Active Goal",
|
| 122 |
+
"lbl_goal_type": "Goal Type",
|
| 123 |
+
"lbl_target": "Target Value",
|
| 124 |
+
"lbl_unit": "Unit",
|
| 125 |
+
"lbl_date": "Target Date",
|
| 126 |
+
"btn_save_goal": "Set Goal",
|
| 127 |
+
"goal_saved": "Goal updated!",
|
| 128 |
+
"lbl_progress": "Progress",
|
| 129 |
+
"lbl_total_distance": "Distance",
|
| 130 |
+
"lbl_distance": "Distance",
|
| 131 |
+
"lbl_avg_pace": "Avg Pace",
|
| 132 |
+
"lbl_avg_hr": "Avg HR",
|
| 133 |
+
"lbl_runs_count": "Runs",
|
| 134 |
+
"goal_trajectory_title": "Goal Trajectory",
|
| 135 |
+
"goal_status": "Status",
|
| 136 |
+
"goal_progress": "Progress",
|
| 137 |
+
"goal_next_milestone": "Next Milestone",
|
| 138 |
+
"goal_status_ahead": "Ahead",
|
| 139 |
+
"goal_status_on_track": "On Track",
|
| 140 |
+
"goal_status_behind": "Behind",
|
| 141 |
+
"lbl_latest_data": "Latest Data Found",
|
| 142 |
+
"lbl_consistency": "Consistency",
|
| 143 |
+
"lbl_metric": "Metric",
|
| 144 |
+
"lbl_interpreted_delta": "Interpreted Δ",
|
| 145 |
+
"performance_first_week_title": "This Week in Running",
|
| 146 |
+
"performance_first_week_body": "This is your first recorded week. You’re building your performance baseline. As you log more runs, your trends will become clearer.",
|
| 147 |
+
"performance_first_week_focus": "Keep logging your sessions to establish a consistent rhythm.",
|
| 148 |
+
"error_insufficient_data": "*Not enough history for period comparison (8 weeks required).*",
|
| 149 |
+
"goal_status_on_track": "On Track",
|
| 150 |
+
"goal_status_slightly_behind": "Slightly Behind",
|
| 151 |
+
"goal_status_behind": "Closing the Gap",
|
| 152 |
+
"goal_type_race": "Race/Event",
|
| 153 |
+
"goal_type_volume": "Weekly Volume",
|
| 154 |
+
"goal_type_pace": "Target Pace",
|
| 155 |
+
"unit_km": "km",
|
| 156 |
+
"unit_runs": "run(s)",
|
| 157 |
+
"unit_bpm": "bpm",
|
| 158 |
+
"unit_pts": "pts",
|
| 159 |
+
"unit_spkm": "s/km",
|
| 160 |
+
"na": "N/A",
|
| 161 |
+
"sec_structure": "Weekly Structure",
|
| 162 |
+
"lbl_weekday_runs": "Weekday Runs",
|
| 163 |
+
"lbl_long_run": "Long Run",
|
| 164 |
+
"lbl_structure_status": "Structure Status",
|
| 165 |
+
"lbl_km_remaining": "km remaining",
|
| 166 |
+
"lbl_km_remaining_subtext": "{val} km",
|
| 167 |
+
"coaching_advice": "Stay consistent this week to close the gap.",
|
| 168 |
+
"strong_week": "Strong Week",
|
| 169 |
+
"structured_but_light": "Structured but Imcomplete Volume",
|
| 170 |
+
"rebuild_week": "Rebuild Week",
|
| 171 |
+
"reset_week": "Reset Week",
|
| 172 |
+
"status_pending": "Pending",
|
| 173 |
+
"status_completed": "Completed",
|
| 174 |
+
"current_week_label": "Current Week",
|
| 175 |
+
"last_week_label": "Last Week",
|
| 176 |
+
"lbl_snapshot_title": "Weekly Snapshot",
|
| 177 |
+
"no_data_available": "*No runs logged yet this week.*",
|
| 178 |
+
"home_no_goal_banner": "No active goal set. Set a goal to generate your weekly training plan.",
|
| 179 |
+
"performance_card_header": "This Week in Running",
|
| 180 |
+
"delta_vs_4w": "Δ vs 4W avg",
|
| 181 |
+
"focus_label": "Forward Focus",
|
| 182 |
+
"no_runs_message": "No runs recorded yet. Looking forward to your first activity!",
|
| 183 |
+
"btn_download_card": "Download Performance Card",
|
| 184 |
+
"next_run.title": "Next Recommended Run",
|
| 185 |
+
"next_run.focus": "Focus",
|
| 186 |
+
"next_run.session": "Suggested Session",
|
| 187 |
+
"next_run.why": "Why",
|
| 188 |
+
"next_run.upload_runs_prompt": "### 🏃 Next Run\n*Upload and analyze runs to see your next recommended session.*",
|
| 189 |
+
"next_run.set_goal_prompt": "### 🏃 Next Run\n*Set a goal in your Profile to receive personalized training recommendations.*",
|
| 190 |
+
"insights_risk_signal_heading": "Risk Signal",
|
| 191 |
+
"insights_key_observations_heading": "Key Observations",
|
| 192 |
+
"lbl_home_week_range": "Current Week",
|
| 193 |
+
"lbl_health_signal": "Health Signal",
|
| 194 |
+
"lbl_goal_trajectory": "Goal Trajectory",
|
| 195 |
+
"lbl_this_week": "This Week",
|
| 196 |
+
"lbl_local_folder_path": "Local folder path (Server mode)",
|
| 197 |
+
"btn_analyse_folder": "Analyse Folder",
|
| 198 |
+
"btn_consult_coach": "Consult your Coach",
|
| 199 |
+
"btn_starter_limiter": "Biggest limiter?",
|
| 200 |
+
"btn_starter_why": "Why this focus?",
|
| 201 |
+
"btn_starter_fatigue": "Fatigue risk?",
|
| 202 |
+
"acc_knowledge_base": "Knowledge Base & Templates",
|
| 203 |
+
"lbl_category": "Category",
|
| 204 |
+
"lbl_template": "Template",
|
| 205 |
+
"lbl_preview": "Preview",
|
| 206 |
+
"btn_insert_message": "Insert into Message Box",
|
| 207 |
+
"acc_adjustments": "Adjustments & Notes",
|
| 208 |
+
"lbl_pace_trend": "Pace Trend Analysis",
|
| 209 |
+
"lbl_hr_trend": "Heart Rate Analysis",
|
| 210 |
+
"lbl_gender_male": "Male",
|
| 211 |
+
"lbl_gender_female": "Female",
|
| 212 |
+
"lbl_gender_pns": "Prefer not to say",
|
| 213 |
+
"lbl_gender_none": "None",
|
| 214 |
+
"lbl_exp_beginner": "Beginner",
|
| 215 |
+
"lbl_exp_intermediate": "Intermediate",
|
| 216 |
+
"lbl_exp_advanced": "Advanced",
|
| 217 |
+
"lbl_focus_inline": "**Focus:**",
|
| 218 |
+
"lbl_total_of": "total of",
|
| 219 |
+
"lbl_more": "more",
|
| 220 |
+
"lbl_less": "less",
|
| 221 |
+
"lbl_faster": "faster",
|
| 222 |
+
"lbl_slower": "slower",
|
| 223 |
+
"lbl_that_is": "That's",
|
| 224 |
+
"lbl_than_avg": "than your recent average.",
|
| 225 |
+
"lbl_pace_was": "Your pace was",
|
| 226 |
+
"runner_positioning_title": "Runner Positioning",
|
| 227 |
+
"positioning_insufficient_data": "Insufficient data to determine positioning yet.",
|
| 228 |
+
"positioning_no_data": "No training data available yet.",
|
| 229 |
+
"positioning_context_title": "4-Week Context",
|
| 230 |
+
"positioning_state_title": "Training State",
|
| 231 |
+
"positioning_summary_title": "Summary",
|
| 232 |
+
"positioning_focus_title": "Forward Focus",
|
| 233 |
+
"positioning_current_state": "Training State",
|
| 234 |
+
"positioning_health_signal": "Health Signal",
|
| 235 |
+
"positioning_goal_trajectory": "Goal Trajectory",
|
| 236 |
+
"analysis_pending": "Analysis pending...",
|
| 237 |
+
"analysis_failed": "Analysis failed.",
|
| 238 |
+
# Health signals
|
| 239 |
+
"health_stable": "Stable",
|
| 240 |
+
"health_overreaching": "Under High Load",
|
| 241 |
+
"health_strain": "Strain Detected",
|
| 242 |
+
# Position status
|
| 243 |
+
"position_advancing": "Advancing",
|
| 244 |
+
"position_stable": "Stable",
|
| 245 |
+
"position_drifting": "Drifting",
|
| 246 |
+
# Goal trajectory
|
| 247 |
+
"trajectory_improving": "Progressing",
|
| 248 |
+
"trajectory_maintaining": "Holding steady",
|
| 249 |
+
"trajectory_declining": "Losing ground",
|
| 250 |
+
# Focus guidance
|
| 251 |
+
"focus_recovery": "Prioritize recovery and absorb recent load.",
|
| 252 |
+
"focus_build": "Continue building volume gradually.",
|
| 253 |
+
"focus_maintenance": "Maintain current structure and protect consistency.",
|
| 254 |
+
"focus_reduce_load": "Reduce load to prevent strain.",
|
| 255 |
+
# Positioning v1 Status Names
|
| 256 |
+
"positioning_status_constructive_adaptation": "Constructive Adaptation",
|
| 257 |
+
"positioning_status_productive_load": "Productive Load",
|
| 258 |
+
"positioning_status_strain": "Compensatory Strain",
|
| 259 |
+
"positioning_status_plateau": "Plateau",
|
| 260 |
+
# Positioning v1 Headlines
|
| 261 |
+
"positioning_headline_building": "You’re rebuilding consistency with limited activity this week.",
|
| 262 |
+
"positioning_headline_constructive_adaptation": "Your training is driving clear constructive adaptation.",
|
| 263 |
+
"positioning_headline_productive_load": "You protected your baseline this week but did not drive new adaptation.",
|
| 264 |
+
"positioning_headline_strain": "Current load is exceeding your capacity to absorb it safely.",
|
| 265 |
+
"positioning_headline_plateau": "Training has stabilized after a reduction in volume and frequency. Intensity may now be insufficient to stimulate further adaptation.",
|
| 266 |
+
# Rationales
|
| 267 |
+
"positioning_rationale_building": "Establishing initial training consistency. Focus on building a stable aerobic foundation before progressing load.",
|
| 268 |
+
"positioning_rationale_constructive_adaptation": "Load is increasing while maintaining consistency.",
|
| 269 |
+
"positioning_rationale_productive_load": "Maintaining load and protecting baseline.",
|
| 270 |
+
"positioning_rationale_strain": "Load increase is outpacing cardiovascular adaptation.",
|
| 271 |
+
"positioning_rationale_plateau": "Training has stabilized.",
|
| 272 |
+
# Forward Focus
|
| 273 |
+
"positioning_forward_focus_constructive_adaptation": "Maintain recent volume and allow adaptation to solidify.",
|
| 274 |
+
"positioning_forward_focus_productive_load": "Reinforce frequency consistency this week.",
|
| 275 |
+
"positioning_forward_focus_strain": "Prioritize active recovery and reduce non-essential session load.",
|
| 276 |
+
"positioning_forward_focus_plateau": "Introduce localized intensity spikes to challenge the baseline.",
|
| 277 |
+
"positioning_status_baseline_building": "Baseline Building",
|
| 278 |
+
"positioning_headline_baseline_building": "Your training baseline is being established.",
|
| 279 |
+
"positioning_rationale_building_building": "Initial week(s) of training detected. Collecting data to establish trends.",
|
| 280 |
+
"positioning_forward_focus_baseline_building": "Establish consistent easy runs this week.",
|
| 281 |
+
"positioning_trajectory_baseline_building": "Not enough history yet to assess training trends.",
|
| 282 |
+
"trajectory_no_goal": "No active goal",
|
| 283 |
+
"trajectory_establishing": "Establishing baseline",
|
| 284 |
+
# Trajectory
|
| 285 |
+
"positioning_trajectory_building": "Building: If this pattern continues, you are on track to exceed recent benchmarks.",
|
| 286 |
+
"positioning_trajectory_stable": "Stable: You are maintaining your level, ideal for long-term health.",
|
| 287 |
+
"positioning_trajectory_fatigue": "Fatigue Risk: If this pattern continues, performance may decline without recovery.",
|
| 288 |
+
"positioning_trajectory_plateau": "Training load has stabilized and adaptation appears to have plateaued.",
|
| 289 |
+
# Phases
|
| 290 |
+
"positioning_phase_title": "Training Phase",
|
| 291 |
+
"positioning_phase_base": "Base Phase",
|
| 292 |
+
"positioning_phase_build": "Build Phase",
|
| 293 |
+
"positioning_phase_peak": "Peak Phase",
|
| 294 |
+
"positioning_phase_recovery": "Recovery Phase",
|
| 295 |
+
"positioning_phase_plateau": "Plateau Phase",
|
| 296 |
+
# Coaching Narrative
|
| 297 |
+
"building_consistency": "You’ve started your week — focus on consistency.",
|
| 298 |
+
"so_far_this_week": "{val} so far this week",
|
| 299 |
+
"early_week_building": "You're early in the week, building your routine.",
|
| 300 |
+
"rebuild_phase": "Rebuilding consistency after a lower-volume week.",
|
| 301 |
+
"rebuilding_consistency_msg": "Rebuilding consistency after a lower-volume week.",
|
| 302 |
+
# Evidence
|
| 303 |
+
"positioning_evidence_title": "Evidence Breakdown",
|
| 304 |
+
"positioning_evidence_distance": "Distance",
|
| 305 |
+
"positioning_evidence_hr": "HR variance",
|
| 306 |
+
"positioning_evidence_frequency": "Frequency",
|
| 307 |
+
"positioning_evidence_consistency": "Consistency",
|
| 308 |
+
"positioning_not_enough_history": "Not enough training history to determine positioning yet.",
|
| 309 |
+
"positioning_evidence_pace_improved": "Pace improved",
|
| 310 |
+
"positioning_evidence_pace_worsened": "Pace worsened",
|
| 311 |
+
# Training Recommendations
|
| 312 |
+
"rec_title": "🎯 Training Recommendation",
|
| 313 |
+
"rec_lbl_focus": "**Focus:**",
|
| 314 |
+
"rec_lbl_session": "**Suggested Session:**",
|
| 315 |
+
"rec_lbl_explanation": "**Explanation:**",
|
| 316 |
+
"rec_focus_protect_recovery": "Protect Recovery",
|
| 317 |
+
"rec_focus_build_endurance": "Build Endurance",
|
| 318 |
+
"rec_focus_introduce_intensity": "Introduce Intensity",
|
| 319 |
+
"rec_focus_maintain_consistency": "Maintain Consistency",
|
| 320 |
+
"rec_session_easy_run": "Easy Recovery Run",
|
| 321 |
+
"rec_session_long_run": "Aerobic Long Run",
|
| 322 |
+
"rec_session_tempo_intervals": "Tempo Intervals",
|
| 323 |
+
"rec_session_steady_run": "20-minute easy run",
|
| 324 |
+
"rec_desc_protect_recovery": "Your indicators suggest accumulated fatigue. Prioritize low-intensity movement to allow cardiovascular and structural recovery.",
|
| 325 |
+
"rec_desc_build_endurance": "Stability in your recent markers shows you are ready to expand your aerobic base. Focus on extending duration at a comfortable effort.",
|
| 326 |
+
"rec_desc_introduce_intensity": "Your capacity for adaptation is high right now. Introducing controlled intensity will help break through current performance plateaus.",
|
| 327 |
+
"rec_desc_maintain_consistency": "Focus on protecting your current training rhythm. Consistency is your most powerful lever right now.",
|
| 328 |
+
},
|
| 329 |
+
"pt-BR": {
|
| 330 |
+
"welcome": "Bem-vindo ao Runner Intelligence. Envie sua primeira corrida e o sistema começará a construir sua base de treinamento e gerar insights.",
|
| 331 |
+
"title": "# ��� Runner Inteligência Agêntica (Experimental)",
|
| 332 |
+
"subtitle": '<span class="muted">Treinamento personalizado a partir dos seus dados.</span>',
|
| 333 |
+
"workflow": '<span class="muted"><b>Fluxo:</b> Enviar corridas ➔ Ver Insights ➔ Plano Semanal ➔ Chat com Treinador</span><br/><span class="muted"><i>Formatos: .fit, .fit.gz, .tcx, .tcx.gz, .gpx (max 50MB)</i></span><br/><span class="muted">Feedback: <a href="https://github.com/ideas-to-life/runner-agentic-intelligence/issues" target="_blank">GitHub Issues</a></span>',
|
| 334 |
+
"evolution_workflow": '<span class="muted">Acompanhe sua evolução. Revise seu check-in semanal e progresso vs mês passado.</span><br/><span class="muted">Envie novas corridas a qualquer momento — seu painel será atualizado automaticamente.</span><br/><span class="muted">Feedback: <a href="https://github.com/ideas-to-life/runner-agentic-intelligence/issues" target="_blank">GitHub Issues</a></span>',
|
| 335 |
+
"tab_analyse": "Analisar",
|
| 336 |
+
"tab_results": "Resultados",
|
| 337 |
+
"tab_coach": "Treinador",
|
| 338 |
+
"tab_home": "Início",
|
| 339 |
+
"tab_intelligence": "Inteligência",
|
| 340 |
+
"tab_positioning": "Posicionamento",
|
| 341 |
+
"lbl_your_week": "Sua Semana",
|
| 342 |
+
"lbl_coach_brief": "Resumo do Treinador",
|
| 343 |
+
"lbl_parsed": "Processadas",
|
| 344 |
+
"unit_weeks": "semanas",
|
| 345 |
+
"lbl_across": "ao longo de",
|
| 346 |
+
"lbl_current": "Atual",
|
| 347 |
+
"lbl_previous": "Passada",
|
| 348 |
+
"lbl_distance": "Distância",
|
| 349 |
+
"lbl_runs": "Corridas",
|
| 350 |
+
"lbl_avg_pace": "Ritmo Médio",
|
| 351 |
+
"lbl_consistency": "Consistência",
|
| 352 |
+
"goal_status_template": "Sua meta está **{val}**.",
|
| 353 |
+
"home_story_template": "Você correu **{count}** {unit} esta semana, em um {of} **{dist:.1f} km**.",
|
| 354 |
+
"lbl_current_state": "Estado Atual",
|
| 355 |
+
"lbl_key_insight": "Insight Principal",
|
| 356 |
+
"lbl_forward_focus": "Foco da Semana",
|
| 357 |
+
"lbl_details": "Detalhes",
|
| 358 |
+
"sec_evolution": "📈 Progresso vs Mês Passado",
|
| 359 |
+
"sec_checkin": "Check-In Semanal",
|
| 360 |
+
"lbl_goal_traj_inline": "**🎯 Trajetória:**",
|
| 361 |
+
"lbl_focus_inline": "**🎯 Foco:**",
|
| 362 |
+
"sec_snapshot": "Snapshot da Semana Atual",
|
| 363 |
+
"sec_upload": "🗂️ Envie seus dados de treino",
|
| 364 |
+
"upload_hints": '<div class="muted" style="margin-bottom: 12px; font-size: 0.9rem;"><ul><li>Melhores resultados: envie 4–12 corridas recentes (ou ~2–6 semanas).</li><li>Inclua dados de FC se tiver (ajuda com sinais de fadiga/risco).</li><li>Depois clique em ‘Analisar Dados’ para gerar Resultados e contexto.</li></ul></div>',
|
| 365 |
+
"upload_label": "Arraste e solte ou clique para enviar",
|
| 366 |
+
"btn_analyse": "🚀 Analisar Dados",
|
| 367 |
+
"btn_reset": "Reiniciar",
|
| 368 |
+
"sec_insights": "Insights do Treinador",
|
| 369 |
+
"lbl_risk": "Avaliação de Risco/Segurança",
|
| 370 |
+
"lbl_lever": "Alavanca Principal",
|
| 371 |
+
"sec_plan": "Plano de Treino Semanal",
|
| 372 |
+
"sec_charts": "Tendências e Análises",
|
| 373 |
+
"sec_chat": "Pergunte sobre seu treino",
|
| 374 |
+
"chat_indicator_no": "⚠️ *Envie e analise corridas para habilitar o contexto do chat.*",
|
| 375 |
+
"chat_indicator_yes": "✅ *Contexto do chat habilitado.*",
|
| 376 |
+
"chat_tip": "Dica: tente ‘Plotar meu ritmo’ ou ‘Mostrar gráfico de FC’ para visualizar seus dados.",
|
| 377 |
+
"chat_placeholder": "ex: Como foi meu ritmo semana passada?",
|
| 378 |
+
"btn_send": "Enviar",
|
| 379 |
+
"btn_clear": "Limpar Histórico",
|
| 380 |
+
"lang_label": "Language / Idioma",
|
| 381 |
+
"starter_limiter": "Maior limitador?",
|
| 382 |
+
"starter_why": "Por que este foco?",
|
| 383 |
+
"starter_fatigue": "Risco de fadiga?",
|
| 384 |
+
"starter_keep": "O que manter?",
|
| 385 |
+
"chat_starter_limiter": "Qual é o meu maior limitador de treino?",
|
| 386 |
+
"chat_starter_why": "Por que você sugeriu este foco de treinamento?",
|
| 387 |
+
"chat_starter_fatigue": "Você vê algum sinal de excesso de treino ou fadiga?",
|
| 388 |
+
"chat_starter_keep": "Quais aspectos do meu treino devo continuar fazendo?",
|
| 389 |
+
"analyse_progress": "Análise dos dados de treino em andamento...",
|
| 390 |
+
"insights_primary_lever_heading": "Alavanca Principal",
|
| 391 |
+
"insights_risk_signal_heading": "Sinal de Risco",
|
| 392 |
+
"insights_key_observations_heading": "Observações Principais",
|
| 393 |
+
"insights_evidence_label": "Evidências",
|
| 394 |
+
"insights_constraint_label": "Restrição",
|
| 395 |
+
"insights_no_primary_lever": "Nenhuma alavanca principal identificada.",
|
| 396 |
+
"insights_no_risk": "Nenhum risco elevado detectado.",
|
| 397 |
+
"insights_no_observations": "*Nenhuma observação fornecida.*",
|
| 398 |
+
"insights_analysis_failed": "*Falha na Análise*",
|
| 399 |
+
"insights_analysis_pending": "*Análise pendente...*",
|
| 400 |
+
"insights_raw_default": "Os insights brutos aparecerão aqui.",
|
| 401 |
+
"plan_week_summary_label": "Resumo da semana:",
|
| 402 |
+
"plan_focus_label": "Foco desta semana:",
|
| 403 |
+
"plan_pending": "*O plano aparecerá aqui após a análise.*",
|
| 404 |
+
"progress_extracting": "Extraindo características dos dados de corrida...",
|
| 405 |
+
"progress_generating": "Gerando insights e plano de treinamento...",
|
| 406 |
+
"pipeline_stage_1": "🏃 Enviando corridas",
|
| 407 |
+
"pipeline_stage_2": "📊 Construindo snapshot semanal",
|
| 408 |
+
"pipeline_stage_3": "📈 Analisando tendências de treino",
|
| 409 |
+
"pipeline_stage_4": "🧠 Gerando insights do treinador",
|
| 410 |
+
"pipeline_stage_5": "📋 Preparando recomendação de treino",
|
| 411 |
+
"init_stage_1": "⚙️ Inicializando serviços...",
|
| 412 |
+
"init_stage_2": "👤 Carregando perfil do corredor...",
|
| 413 |
+
"init_stage_3": "📂 Carregando histórico de treino...",
|
| 414 |
+
"init_stage_4": "🏠 Construindo dashboard...",
|
| 415 |
+
"week_stage_1": "📅 Alterando semana...",
|
| 416 |
+
"week_stage_2": "📊 Construindo snapshot semanal...",
|
| 417 |
+
"week_stage_3": "🏠 Atualizando dashboard...",
|
| 418 |
+
"chat_context_indicator": "🧠 **Usando seus insights de treinamento como contexto**",
|
| 419 |
+
"error_no_runs": "Nenhuma corrida para processar.",
|
| 420 |
+
"lbl_details": "Detalhes (Risco e Observações)",
|
| 421 |
+
"banner_title": "⚠️ Informações do Public Preview",
|
| 422 |
+
"banner_session": "**Dados da Sessão**: Todos os dados enviados e histórico de chat são temporários e apagados após a sessão.",
|
| 423 |
+
"banner_persistence_disabled": "🟡 **Modo Public Preview** — Nenhum dado é persistido. Todos os envios e métricas derivadas são temporários.",
|
| 424 |
+
"banner_persistence_enabled": "🟢 **Persistência de Histórico Ativada** — Snapshots semanais e tendências são armazenados localmente.",
|
| 425 |
+
"banner_medical": "**Aviso Médico**: Este sistema fornece insights de treinamento, NÃO aconselhamento médico. Consulte um profissional antes de iniciar qualquer plano de treino.",
|
| 426 |
+
"banner_full": "**Experiência Completa**: Para persistência total e armazenamento local, execute o app localmente a partir do [GitHub](https://github.com/avfranco/runner-agentic-intelligence).",
|
| 427 |
+
"insights_timestamp_label": "Insights gerados em",
|
| 428 |
+
"risk_level_low": "BAIXO",
|
| 429 |
+
"risk_level_medium": "MÉDIO",
|
| 430 |
+
"risk_level_high": "ALTO",
|
| 431 |
+
"chat_error": "Desculpe, estou com problemas para processar seu pedido agora.",
|
| 432 |
+
"plan_error": "Não foi possível gerar o plano neste momento.",
|
| 433 |
+
"tab_profile": "Perfil",
|
| 434 |
+
"sec_profile": "## Perfil do Corredor (Gêmeo Digital v1)",
|
| 435 |
+
"lbl_display_name": "Nome de Exibição",
|
| 436 |
+
"lbl_age": "Idade",
|
| 437 |
+
"lbl_experience": "Nível de Experiência",
|
| 438 |
+
"lbl_injury_notes": "Histórico de Lesões / Notas",
|
| 439 |
+
"btn_save_profile": "Salvar Perfil",
|
| 440 |
+
"profile_saved": "Perfil salvo com sucesso!",
|
| 441 |
+
"profile_save_error": "Erro ao salvar perfil.",
|
| 442 |
+
"lbl_baseline": "Distância Semanal Base (km)",
|
| 443 |
+
"lbl_gender": "Gênero",
|
| 444 |
+
"sec_goal": "Objetivo Atual",
|
| 445 |
+
"lbl_goal_type": "Tipo de Objetivo",
|
| 446 |
+
"lbl_target": "Valor Alvo",
|
| 447 |
+
"lbl_unit": "Unidade",
|
| 448 |
+
"lbl_date": "Data Alvo",
|
| 449 |
+
"btn_save_goal": "Definir Objetivo",
|
| 450 |
+
"goal_saved": "Objetivo atualizado!",
|
| 451 |
+
"lbl_progress": "Progresso",
|
| 452 |
+
"lbl_total_distance": "Distância",
|
| 453 |
+
"lbl_distance": "Distância",
|
| 454 |
+
"lbl_avg_pace": "Ritmo Médio",
|
| 455 |
+
"lbl_avg_hr": "FC Média",
|
| 456 |
+
"lbl_runs_count": "Corridas",
|
| 457 |
+
"goal_trajectory_title": "Trajetória da Meta",
|
| 458 |
+
"goal_status": "Status",
|
| 459 |
+
"goal_progress": "Progresso",
|
| 460 |
+
"goal_next_milestone": "Próximo Marco",
|
| 461 |
+
"goal_status_ahead": "Adiantado",
|
| 462 |
+
"goal_status_on_track": "No Caminho",
|
| 463 |
+
"goal_status_behind": "Atrasado",
|
| 464 |
+
"lbl_latest_data": "Últimos Dados Encontrados",
|
| 465 |
+
"lbl_consistency": "Consistência",
|
| 466 |
+
"lbl_metric": "Métrica",
|
| 467 |
+
"lbl_interpreted_delta": "Δ Interpretado",
|
| 468 |
+
"performance_first_week_title": "Esta Semana nas Corridas",
|
| 469 |
+
"performance_first_week_body": "Esta é a sua primeira semana registrada. Você está construindo sua base de performance. Conforme você registrar mais corridas, suas tendências ficarão mais claras.",
|
| 470 |
+
"performance_first_week_focus": "Continue registrando suas sessões para estabelecer um ritmo consistente.",
|
| 471 |
+
"error_insufficient_data": "*Histórico insuficiente para comparação (8 semanas necessárias).*",
|
| 472 |
+
"goal_status_on_track": "No Caminho",
|
| 473 |
+
"goal_status_slightly_behind": "Um Pouco Abaixo",
|
| 474 |
+
"goal_status_behind": "Em Progresso para a Meta",
|
| 475 |
+
"goal_type_race": "Prova/Evento",
|
| 476 |
+
"goal_type_volume": "Volume Semanal",
|
| 477 |
+
"goal_type_pace": "Ritmo Alvo",
|
| 478 |
+
"unit_km": "km",
|
| 479 |
+
"unit_runs": "corrida(s)",
|
| 480 |
+
"unit_bpm": "bpm",
|
| 481 |
+
"unit_pts": "pts",
|
| 482 |
+
"unit_spkm": "s/km",
|
| 483 |
+
"na": "N/A",
|
| 484 |
+
"sec_structure": "Estrutura Semanal",
|
| 485 |
+
"lbl_weekday_runs": "Corridas Semanais",
|
| 486 |
+
"lbl_long_run": "Longão",
|
| 487 |
+
"lbl_structure_status": "Status da Estrutura",
|
| 488 |
+
"lbl_km_remaining": "km restantes",
|
| 489 |
+
"lbl_km_remaining_subtext": "{val} km",
|
| 490 |
+
"coaching_advice": "Mantenha a consistência esta semana para fechar a meta.",
|
| 491 |
+
"strong_week": "Semana Forte",
|
| 492 |
+
"structured_but_light": "Estruturada, mas Leve",
|
| 493 |
+
"rebuild_week": "Reconstrução",
|
| 494 |
+
"reset_week": "Recuperação/Reset",
|
| 495 |
+
"status_pending": "Pendente",
|
| 496 |
+
"status_completed": "Concluído",
|
| 497 |
+
"current_week_label": "Semana Atual",
|
| 498 |
+
"last_week_label": "Semana Passada",
|
| 499 |
+
"lbl_snapshot_title": "Snapshot da Semana Atual",
|
| 500 |
+
"no_data_available": "*Nenhuma corrida registrada esta semana ainda.*",
|
| 501 |
+
"home_no_goal_banner": "Nenhum objetivo ativo definido. Defina um objetivo para gerar seu plano de treino semanal.",
|
| 502 |
+
"performance_card_header": "Esta Semana nas Corridas",
|
| 503 |
+
"delta_vs_4w": "Δ vs méd. 4s",
|
| 504 |
+
"focus_label": "Foco à Frente",
|
| 505 |
+
"no_runs_message": "Nenhuma corrida registrada ainda. No aguardo da sua primeira atividade!",
|
| 506 |
+
"btn_download_card": "Baixar Card de Performance",
|
| 507 |
+
"next_run.title": "Próxima Corrida Recomendada",
|
| 508 |
+
"next_run.focus": "Foco",
|
| 509 |
+
"next_run.session": "Sessão Sugerida",
|
| 510 |
+
"next_run.why": "Por quê",
|
| 511 |
+
"next_run.upload_runs_prompt": "### 🏃 Próxima Corrida\n*Envie e analise suas corridas para ver sua próxima sessão recomendada.*",
|
| 512 |
+
"next_run.set_goal_prompt": "### 🏃 Próxima Corrida\n*Defina um objetivo no seu Perfil para receber recomendações de treino personalizadas.*",
|
| 513 |
+
"insights_risk_signal_heading": "Sinal de Risco",
|
| 514 |
+
"insights_key_observations_heading": "Observações Principais",
|
| 515 |
+
"lbl_home_week_range": "Semana Atual",
|
| 516 |
+
"lbl_health_signal": "Sinal de Saúde",
|
| 517 |
+
"lbl_goal_trajectory": "Trajetória da Meta",
|
| 518 |
+
"lbl_this_week": "Esta Semana",
|
| 519 |
+
"lbl_local_folder_path": "Caminho da pasta local (Modo servidor)",
|
| 520 |
+
"btn_analyse_folder": "Analisar Pasta",
|
| 521 |
+
"btn_consult_coach": "Consultar seu Treinador",
|
| 522 |
+
"btn_starter_limiter": "Maior limitador?",
|
| 523 |
+
"btn_starter_why": "Por que este foco?",
|
| 524 |
+
"btn_starter_fatigue": "Risco de fadiga?",
|
| 525 |
+
"acc_knowledge_base": "Base de Conhecimento e Modelos",
|
| 526 |
+
"lbl_category": "Categoria",
|
| 527 |
+
"lbl_template": "Modelo",
|
| 528 |
+
"lbl_preview": "Pré-visualização",
|
| 529 |
+
"btn_insert_message": "Inserir na Caixa de Mensagem",
|
| 530 |
+
"acc_adjustments": "Ajustes e Notas",
|
| 531 |
+
"lbl_pace_trend": "Análise de Tendência de Ritmo",
|
| 532 |
+
"lbl_hr_trend": "Análise de Frequência Cardíaca",
|
| 533 |
+
"lbl_gender_male": "Masculino",
|
| 534 |
+
"lbl_gender_female": "Feminino",
|
| 535 |
+
"lbl_gender_pns": "Prefiro não dizer",
|
| 536 |
+
"lbl_gender_none": "Nenhum",
|
| 537 |
+
"lbl_exp_beginner": "Iniciante",
|
| 538 |
+
"lbl_exp_intermediate": "Intermediário",
|
| 539 |
+
"lbl_exp_advanced": "Avançado",
|
| 540 |
+
"lbl_focus_inline": "**Foco:**",
|
| 541 |
+
"lbl_total_of": "total de",
|
| 542 |
+
"lbl_more": "mais",
|
| 543 |
+
"lbl_less": "menos",
|
| 544 |
+
"lbl_faster": "mais rápido",
|
| 545 |
+
"lbl_slower": "mais lento",
|
| 546 |
+
"lbl_that_is": "Isso é",
|
| 547 |
+
"lbl_than_avg": "que sua média recente.",
|
| 548 |
+
"lbl_pace_was": "Seu ritmo foi",
|
| 549 |
+
"runner_positioning_title": "Posicionamento do Corredor",
|
| 550 |
+
"positioning_insufficient_data": "Dados insuficientes para determinar o posicionamento ainda.",
|
| 551 |
+
"positioning_no_data": "Nenhum dado de treino disponível ainda.",
|
| 552 |
+
"positioning_context_title": "Contexto de 4 Semanas",
|
| 553 |
+
"positioning_state_title": "Estado do Treino",
|
| 554 |
+
"positioning_summary_title": "Resumo",
|
| 555 |
+
"positioning_focus_title": "Foco Recomendado",
|
| 556 |
+
"positioning_current_state": "Estado do Treino",
|
| 557 |
+
"positioning_health_signal": "Sinal de Saúde",
|
| 558 |
+
"positioning_goal_trajectory": "Trajetória do Objetivo",
|
| 559 |
+
"positioning_trajectory": "Trajetória",
|
| 560 |
+
"positioning_insights": "Insights",
|
| 561 |
+
"analysis_pending": "Análise pendente...",
|
| 562 |
+
"analysis_failed": "Falha na análise.",
|
| 563 |
+
# Health signals
|
| 564 |
+
"health_stable": "Estável",
|
| 565 |
+
"health_overreaching": "Carga Elevada",
|
| 566 |
+
"health_strain": "Sinais de Sobrecarga",
|
| 567 |
+
# Position status
|
| 568 |
+
"position_advancing": "Avançando",
|
| 569 |
+
"position_stable": "Estável",
|
| 570 |
+
"position_drifting": "Perdendo Ritmo",
|
| 571 |
+
# Goal trajectory
|
| 572 |
+
"trajectory_improving": "Progredindo",
|
| 573 |
+
"trajectory_maintaining": "Mantendo",
|
| 574 |
+
"trajectory_declining": "Regredindo",
|
| 575 |
+
# Focus guidance
|
| 576 |
+
"focus_recovery": "Priorize a recuperação e absorva a carga recente.",
|
| 577 |
+
"focus_build": "Continue aumentando o volume gradualmente.",
|
| 578 |
+
"focus_maintenance": "Mantenha a estrutura atual e proteja a consistência.",
|
| 579 |
+
"focus_reduce_load": "Reduza a carga para evitar sobrecarga.",
|
| 580 |
+
# Positioning v1 Status Names
|
| 581 |
+
"positioning_status_constructive_adaptation": "Adaptação Construtiva",
|
| 582 |
+
"positioning_status_productive_load": "Carga Produtiva",
|
| 583 |
+
"positioning_status_strain": "Sobrecarga Compensatória",
|
| 584 |
+
"positioning_status_plateau": "Platô",
|
| 585 |
+
# Positioning v1 Headlines
|
| 586 |
+
"positioning_headline_building": "Você está reconstruindo consistência com quantidade de corridas limitada esta semana.",
|
| 587 |
+
"positioning_headline_constructive_adaptation": "Seu treino está gerando uma adaptação construtiva clara.",
|
| 588 |
+
"positioning_headline_productive_load": "Você protegeu sua base esta semana, mas não gerou nova adaptação.",
|
| 589 |
+
"positioning_headline_strain": "A carga atual está excedendo sua capacidade de absorção segura.",
|
| 590 |
+
"positioning_headline_plateau": "O treino estabilizou, mas a intensidade pode ser insuficiente para evolução.",
|
| 591 |
+
# Rationales
|
| 592 |
+
"positioning_rationale_building": "Construindo consistência inicial de treinamento. Foco em estabelecer uma base aeróbica estável antes de progredir a carga.",
|
| 593 |
+
"positioning_rationale_constructive_adaptation": "A carga está aumentando enquanto a consistência se mantém.",
|
| 594 |
+
"positioning_rationale_productive_load": "Mantendo a carga e protegendo a base.",
|
| 595 |
+
"positioning_rationale_strain": "O aumento da carga está superando a adaptação cardiovascular.",
|
| 596 |
+
"positioning_rationale_plateau": "O treinamento estabilizou.",
|
| 597 |
+
# Forward Focus
|
| 598 |
+
"positioning_forward_focus_constructive_adaptation": "Mantenha o volume recente e permita que a adaptação se consolide.",
|
| 599 |
+
"positioning_forward_focus_productive_load": "Reforce a consistência de frequência esta semana.",
|
| 600 |
+
"positioning_forward_focus_strain": "Priorize a recuperação ativa e reduza a carga de sessões não essenciais.",
|
| 601 |
+
"positioning_forward_focus_plateau": "Introduza picos de intensidade localizados para desafiar sua base.",
|
| 602 |
+
"positioning_status_baseline_building": "Construindo Base",
|
| 603 |
+
"positioning_headline_baseline_building": "Sua base de treinamento está sendo estabelecida.",
|
| 604 |
+
"positioning_rationale_building_building": "Semana(s) inicial(is) detectada(s). Coletando dados para estabelecer tendências.",
|
| 605 |
+
"positioning_forward_focus_baseline_building": "Estabeleça corridas leves consistentes esta semana.",
|
| 606 |
+
"positioning_trajectory_baseline_building": "Histórico insuficiente para avaliar tendências de treino.",
|
| 607 |
+
"trajectory_no_goal": "Sem objetivo ativo",
|
| 608 |
+
"trajectory_establishing": "Estabelecendo base",
|
| 609 |
+
# Trajectory
|
| 610 |
+
"positioning_trajectory_building": "Construção: Se este padrão continuar, você está no caminho para superar marcas recentes.",
|
| 611 |
+
"positioning_trajectory_stable": "Estável: Você está mantendo seu nível, ideal para a saúde de longo prazo.",
|
| 612 |
+
"positioning_trajectory_fatigue": "Risco de Fadiga: Se este padrão continuar, a performance pode cair sem recuperação.",
|
| 613 |
+
"positioning_trajectory_plateau": "A carga de treino estabilizou e a adaptação parece ter atingido um platô.",
|
| 614 |
+
# Phases
|
| 615 |
+
"positioning_phase_title": "Fase de Treino",
|
| 616 |
+
"positioning_phase_base": "Fase de Base",
|
| 617 |
+
"positioning_phase_build": "Fase de Construção",
|
| 618 |
+
"positioning_phase_peak": "Fase de Pico",
|
| 619 |
+
"positioning_phase_recovery": "Fase de Recuperação",
|
| 620 |
+
"positioning_phase_plateau": "Fase de Platô",
|
| 621 |
+
# Coaching Narrative
|
| 622 |
+
"building_consistency": "Você começou sua semana — foque na consistência.",
|
| 623 |
+
"so_far_this_week": "{val} até agora esta semana",
|
| 624 |
+
"early_week_building": "Você está no início da semana, construindo sua rotina.",
|
| 625 |
+
"rebuild_phase": "Reconstruindo a consistência após uma semana de menor volume.",
|
| 626 |
+
"rebuilding_consistency_msg": "Reconstruindo a consistência após uma semana de menor volume.",
|
| 627 |
+
# Evidence
|
| 628 |
+
"positioning_evidence_title": "Detalhamento de Evidências",
|
| 629 |
+
"positioning_evidence_distance": "Distância",
|
| 630 |
+
"positioning_evidence_hr": "Variância de FC",
|
| 631 |
+
"positioning_evidence_frequency": "Frequência",
|
| 632 |
+
"positioning_evidence_consistency": "Consistência",
|
| 633 |
+
"positioning_not_enough_history": "Histórico de treino insuficiente para determinar o posicionamento ainda.",
|
| 634 |
+
"positioning_evidence_pace_improved": "Pace melhorou",
|
| 635 |
+
"positioning_evidence_pace_worsened": "Pace piorou",
|
| 636 |
+
# Training Recommendations
|
| 637 |
+
"rec_title": "🎯 Recomendação de Treino",
|
| 638 |
+
"rec_lbl_focus": "**Foco:**",
|
| 639 |
+
"rec_lbl_session": "**Sessão Sugerida:**",
|
| 640 |
+
"rec_lbl_explanation": "**Explicação:**",
|
| 641 |
+
"rec_focus_protect_recovery": "Proteger Recuperação",
|
| 642 |
+
"rec_focus_build_endurance": "Construir Resistência",
|
| 643 |
+
"rec_focus_introduce_intensity": "Introduzir Intensidade",
|
| 644 |
+
"rec_focus_maintain_consistency": "Manter Consistência",
|
| 645 |
+
"rec_session_easy_run": "Corrida de Recuperação Leve",
|
| 646 |
+
"rec_session_long_run": "Longão Aeróbico",
|
| 647 |
+
"rec_session_tempo_intervals": "Intervalado de Tempo",
|
| 648 |
+
"rec_session_steady_run": "Corrida Leve de 20 minutos",
|
| 649 |
+
"rec_desc_protect_recovery": "Seus indicadores sugerem fadiga acumulada. Priorize movimentos de baixa intensidade para permitir a recuperação cardiovascular e estrutural.",
|
| 650 |
+
"rec_desc_build_endurance": "A estabilidade nos seus marcadores recentes mostra que você está pronto para expandir sua base aeróbica. Foque em estender a duração em um esforço confortável.",
|
| 651 |
+
"rec_desc_introduce_intensity": "Sua capacidade de adaptação está alta agora. Introduzir intensidade controlada ajudará a superar os patamares atuais de performance.",
|
| 652 |
+
"rec_desc_maintain_consistency": "Foque em proteger seu ritmo de treino atual. A consistência é sua alavanca mais poderosa agora.",
|
| 653 |
+
},
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
def get_text(key: str, language: str = "en") -> str:
|
| 657 |
+
"""Retrieves localized text for a given key and language."""
|
| 658 |
+
lang_batch = UI_TEXT.get(language, UI_TEXT["en"])
|
| 659 |
+
return lang_batch.get(key, UI_TEXT["en"].get(key, key))
|
src/_app/ui/coaching_helpers.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import date, timedelta
|
| 2 |
+
from typing import Optional, Dict, Any
|
| 3 |
+
from _app.presentation.ui_text import get_text
|
| 4 |
+
|
| 5 |
+
def _get_val(obj, key, default=None):
|
| 6 |
+
if obj is None:
|
| 7 |
+
return default
|
| 8 |
+
|
| 9 |
+
# Mapping for new DTO keys against domain models
|
| 10 |
+
ATTR_MAP = {
|
| 11 |
+
"week_start": "week_start_date",
|
| 12 |
+
"num_runs": "run_count",
|
| 13 |
+
"weekly_distance_km": "total_distance_km"
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
def _extract(target, k):
|
| 17 |
+
if target is None:
|
| 18 |
+
return None, False
|
| 19 |
+
if isinstance(target, dict):
|
| 20 |
+
if k in target:
|
| 21 |
+
return target[k], True
|
| 22 |
+
return None, False
|
| 23 |
+
|
| 24 |
+
from unittest.mock import MagicMock
|
| 25 |
+
if isinstance(target, MagicMock):
|
| 26 |
+
# Only return if specifically set (in __dict__)
|
| 27 |
+
if k in target.__dict__:
|
| 28 |
+
return getattr(target, k), True
|
| 29 |
+
return None, False
|
| 30 |
+
|
| 31 |
+
# Regular object
|
| 32 |
+
if hasattr(target, k):
|
| 33 |
+
return getattr(target, k), True
|
| 34 |
+
return None, False
|
| 35 |
+
|
| 36 |
+
# 1. Try exact key
|
| 37 |
+
val, found = _extract(obj, key)
|
| 38 |
+
if found:
|
| 39 |
+
return val
|
| 40 |
+
|
| 41 |
+
# 2. Try mapped key
|
| 42 |
+
mapped = ATTR_MAP.get(key)
|
| 43 |
+
if mapped:
|
| 44 |
+
val, found = _extract(obj, mapped)
|
| 45 |
+
if found:
|
| 46 |
+
return val
|
| 47 |
+
|
| 48 |
+
# 3. Final fallback
|
| 49 |
+
if isinstance(obj, dict):
|
| 50 |
+
return obj.get(key, default)
|
| 51 |
+
|
| 52 |
+
# For non-mocks, try getattr one last time
|
| 53 |
+
from unittest.mock import MagicMock
|
| 54 |
+
if not isinstance(obj, MagicMock):
|
| 55 |
+
return getattr(obj, key, default)
|
| 56 |
+
|
| 57 |
+
return default
|
| 58 |
+
|
| 59 |
+
def is_current_week(week_start) -> bool:
|
| 60 |
+
"""Detect if the given week start date corresponds to the current week."""
|
| 61 |
+
if not week_start:
|
| 62 |
+
return False
|
| 63 |
+
|
| 64 |
+
from datetime import date
|
| 65 |
+
if isinstance(week_start, str):
|
| 66 |
+
try:
|
| 67 |
+
week_start = date.fromisoformat(week_start)
|
| 68 |
+
except:
|
| 69 |
+
return False
|
| 70 |
+
|
| 71 |
+
today = date.today()
|
| 72 |
+
# Monday of the current week
|
| 73 |
+
current_monday = today - timedelta(days=today.weekday())
|
| 74 |
+
return week_start == current_monday
|
| 75 |
+
|
| 76 |
+
def format_positioning_metrics(snapshot, language: str = "en") -> Dict[str, Any]:
|
| 77 |
+
"""
|
| 78 |
+
Returns absolute metrics for the current week to avoid misleading percentages.
|
| 79 |
+
Returns delta metrics for previous weeks.
|
| 80 |
+
"""
|
| 81 |
+
if not snapshot:
|
| 82 |
+
return {}
|
| 83 |
+
|
| 84 |
+
w_start = _get_val(snapshot, "week_start")
|
| 85 |
+
is_current = is_current_week(w_start)
|
| 86 |
+
|
| 87 |
+
if is_current:
|
| 88 |
+
dist_val = _get_val(snapshot, "weekly_distance_km", 0.0)
|
| 89 |
+
dist_str = get_text("so_far_this_week", language).format(val=f"{dist_val:.1f} km")
|
| 90 |
+
runs_val = _get_val(snapshot, "num_runs", 0)
|
| 91 |
+
runs_str = f"{runs_val} " + (get_text("unit_runs", language) if runs_val != 1 else get_text("lbl_runs", language).lower()[:-1] if language == "en" else "corrida")
|
| 92 |
+
|
| 93 |
+
# Add "building consistency" message if only 1 run
|
| 94 |
+
consistency_msg = ""
|
| 95 |
+
if runs_val <= 1:
|
| 96 |
+
consistency_msg = get_text("building_consistency", language)
|
| 97 |
+
|
| 98 |
+
return {
|
| 99 |
+
"distance": dist_str,
|
| 100 |
+
"runs": runs_str,
|
| 101 |
+
"consistency_msg": consistency_msg,
|
| 102 |
+
"mode": "absolute"
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
trend_val = _get_val(snapshot, "trend")
|
| 106 |
+
if trend_val:
|
| 107 |
+
dist_delta = _get_val(trend_val, "distance_delta_pct", 0.0)
|
| 108 |
+
run_delta = _get_val(trend_val, "frequency_delta", 0)
|
| 109 |
+
else:
|
| 110 |
+
# Fallback to direct attributes for domain models (WeeklySnapshot doesn't have trend attached usually)
|
| 111 |
+
dist_delta = _get_val(snapshot, "distance_delta_pct", 0.0)
|
| 112 |
+
run_delta = _get_val(snapshot, "run_delta", 0)
|
| 113 |
+
|
| 114 |
+
return {
|
| 115 |
+
"distance": f"{dist_delta:+.1f}%",
|
| 116 |
+
"runs": f"{run_delta:+d}",
|
| 117 |
+
"consistency_msg": "",
|
| 118 |
+
"mode": "delta"
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
def get_baseline_aware_target(current_km: float, baseline: float) -> float:
|
| 122 |
+
"""
|
| 123 |
+
Computes a safe target volume based on historical baseline.
|
| 124 |
+
Rule:
|
| 125 |
+
- If current < 60% of baseline -> Rebuild to 60%
|
| 126 |
+
- If current < 80% of baseline -> Rebuild to 80%
|
| 127 |
+
- Otherwise -> target baseline
|
| 128 |
+
- Minimum safety floor: 8.0 km
|
| 129 |
+
"""
|
| 130 |
+
if not baseline or baseline <= 0:
|
| 131 |
+
return max(current_km, 8.0)
|
| 132 |
+
|
| 133 |
+
drop_ratio = current_km / baseline
|
| 134 |
+
|
| 135 |
+
if drop_ratio < 0.6:
|
| 136 |
+
target_km = baseline * 0.6
|
| 137 |
+
elif drop_ratio < 0.8:
|
| 138 |
+
target_km = baseline * 0.8
|
| 139 |
+
else:
|
| 140 |
+
target_km = baseline
|
| 141 |
+
|
| 142 |
+
return max(target_km, 8.0)
|
| 143 |
+
|
| 144 |
+
def interpret_week(snapshot, baseline: Optional[float], language: str = "en") -> Optional[str]:
|
| 145 |
+
"""Add simple interpretation rules for the narrative layer."""
|
| 146 |
+
if not snapshot:
|
| 147 |
+
return None
|
| 148 |
+
|
| 149 |
+
run_count = _get_val(snapshot, "num_runs", 0)
|
| 150 |
+
if run_count <= 1:
|
| 151 |
+
return get_text("early_week_building", language)
|
| 152 |
+
|
| 153 |
+
dist_km = _get_val(snapshot, "weekly_distance_km", 0.0)
|
| 154 |
+
if baseline and dist_km < baseline * 0.6:
|
| 155 |
+
return get_text("rebuild_phase", language)
|
| 156 |
+
|
| 157 |
+
return None
|
src/agents/base.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABC, abstractmethod
|
| 2 |
+
from typing import Dict, Any, Union
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class BaseAgent(ABC):
|
| 7 |
+
@abstractmethod
|
| 8 |
+
def run(self, *args, **kwargs) -> Union[Dict[str, Any], BaseModel]:
|
| 9 |
+
pass
|
src/agents/chat_agent.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import config
|
| 3 |
+
import time
|
| 4 |
+
from typing import Dict, Any, List, Optional
|
| 5 |
+
from .base import BaseAgent
|
| 6 |
+
from llm.base import LLMClient
|
| 7 |
+
from observability import logger as obs_logger
|
| 8 |
+
from observability import components as obs_components
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ChatAgent(BaseAgent):
|
| 15 |
+
"""
|
| 16 |
+
Agent responsible for handling conversational interactions with the user.
|
| 17 |
+
It can answer questions about the run data, insights, and plan, and can also
|
| 18 |
+
delegate to other agents (like VisualizationAgent) for specific tasks.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, llm_client: LLMClient):
|
| 22 |
+
self.llm_client = llm_client
|
| 23 |
+
self.context = {}
|
| 24 |
+
self.instruction = self._load_instruction("en")
|
| 25 |
+
|
| 26 |
+
def _load_instruction(self, language: str = "en") -> str:
|
| 27 |
+
try:
|
| 28 |
+
# Resolve path relative to this file
|
| 29 |
+
base_path = Path(__file__).parent.parent / "prompts"
|
| 30 |
+
filename = f"chat_{language}.txt"
|
| 31 |
+
file_path = base_path / filename
|
| 32 |
+
|
| 33 |
+
if not file_path.exists():
|
| 34 |
+
logger.warning(f"Prompt file not found: {file_path}. Falling back to English.")
|
| 35 |
+
file_path = base_path / "chat_en.txt"
|
| 36 |
+
|
| 37 |
+
if not file_path.exists():
|
| 38 |
+
logger.error("English prompt file missing!")
|
| 39 |
+
return "You are a helpful running coach assistant."
|
| 40 |
+
|
| 41 |
+
return file_path.read_text(encoding="utf-8")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.error(f"Error loading prompt for language {language}: {e}")
|
| 44 |
+
return "You are a helpful running coach assistant."
|
| 45 |
+
|
| 46 |
+
async def run(self, message: str, context: Dict[str, Any], language: str = "en") -> str:
|
| 47 |
+
# Load language-specific instruction
|
| 48 |
+
self.instruction = self._load_instruction(language)
|
| 49 |
+
"""
|
| 50 |
+
Process a user message with the given context.
|
| 51 |
+
"""
|
| 52 |
+
self.context = context
|
| 53 |
+
|
| 54 |
+
# Construct a prompt with context
|
| 55 |
+
is_pt = language == "pt-BR"
|
| 56 |
+
if is_pt:
|
| 57 |
+
prompt = f"""
|
| 58 |
+
Contexto:
|
| 59 |
+
Características: {context.get('features', 'Não disponível')}
|
| 60 |
+
Insights: {context.get('insights', 'Não disponível')}
|
| 61 |
+
Plano: {context.get('plan', 'Não disponível')}
|
| 62 |
+
Resumo: {context.get('summary', 'Não disponível')}
|
| 63 |
+
|
| 64 |
+
### Contexto de Performance Histórica (Injetado automaticamente)
|
| 65 |
+
{self._format_auto_insights(context.get('auto_injected_insights', []), language=language)}
|
| 66 |
+
|
| 67 |
+
Mensagem do Usuário: {message}
|
| 68 |
+
|
| 69 |
+
Resposta:
|
| 70 |
+
"""
|
| 71 |
+
else:
|
| 72 |
+
prompt = f"""
|
| 73 |
+
Context:
|
| 74 |
+
Features: {context.get('features', 'Not available')}
|
| 75 |
+
Insights: {context.get('insights', 'Not available')}
|
| 76 |
+
Plan: {context.get('plan', 'Not available')}
|
| 77 |
+
Summary: {context.get('summary', 'Not available')}
|
| 78 |
+
|
| 79 |
+
### Historical Performance Context (Auto-injected)
|
| 80 |
+
{self._format_auto_insights(context.get('auto_injected_insights', []))}
|
| 81 |
+
|
| 82 |
+
User Message: {message}
|
| 83 |
+
|
| 84 |
+
Answer:
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
with obs_logger.start_span("chat_agent.run", obs_components.AGENT):
|
| 88 |
+
start_time = time.time()
|
| 89 |
+
try:
|
| 90 |
+
response = await self.llm_client.generate(
|
| 91 |
+
prompt, instruction=self.instruction, name="chat_agent"
|
| 92 |
+
)
|
| 93 |
+
duration_ms = (time.time() - start_time) * 1000
|
| 94 |
+
obs_logger.log_event(
|
| 95 |
+
"info",
|
| 96 |
+
"Chat response generated",
|
| 97 |
+
component=obs_components.AGENT,
|
| 98 |
+
fields={
|
| 99 |
+
"duration_ms": duration_ms,
|
| 100 |
+
"language": language,
|
| 101 |
+
"message_length": len(message),
|
| 102 |
+
"response_length": len(str(response)),
|
| 103 |
+
},
|
| 104 |
+
)
|
| 105 |
+
return str(response)
|
| 106 |
+
except Exception as e:
|
| 107 |
+
duration_ms = (time.time() - start_time) * 1000
|
| 108 |
+
obs_logger.log_event(
|
| 109 |
+
"error",
|
| 110 |
+
f"Chat agent failed: {e}",
|
| 111 |
+
component=obs_components.AGENT,
|
| 112 |
+
fields={
|
| 113 |
+
"duration_ms": duration_ms,
|
| 114 |
+
"language": language,
|
| 115 |
+
"error_type": type(e).__name__,
|
| 116 |
+
"error_message": str(e),
|
| 117 |
+
},
|
| 118 |
+
)
|
| 119 |
+
logger.error(f"Chat agent failed: {e}")
|
| 120 |
+
return (
|
| 121 |
+
"Desculpe, estou com problemas para processar seu pedido agora."
|
| 122 |
+
if language == "pt-BR"
|
| 123 |
+
else "I'm sorry, I'm having trouble processing your request right now."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
def _format_auto_insights(self, insights: List[Dict[str, Any]], language: str = "en") -> str:
|
| 127 |
+
is_pt = language == "pt-BR"
|
| 128 |
+
if not insights:
|
| 129 |
+
return (
|
| 130 |
+
"Nenhum insight anterior encontrado no histórico."
|
| 131 |
+
if is_pt
|
| 132 |
+
else "No previous insights found in history."
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
lines = []
|
| 136 |
+
unknown_date = "Data Desconhecida" if is_pt else "Unknown Date"
|
| 137 |
+
for item in insights:
|
| 138 |
+
date_str = item.get("date", unknown_date)
|
| 139 |
+
# Insights are stored as a dict of message strings
|
| 140 |
+
msgs = item.get("insights", {})
|
| 141 |
+
if isinstance(msgs, dict):
|
| 142 |
+
parts = []
|
| 143 |
+
for k, v in msgs.items():
|
| 144 |
+
if isinstance(v, dict):
|
| 145 |
+
m = v.get("message")
|
| 146 |
+
if m:
|
| 147 |
+
parts.append(m)
|
| 148 |
+
elif isinstance(v, list):
|
| 149 |
+
for sub_v in v:
|
| 150 |
+
if isinstance(sub_v, dict):
|
| 151 |
+
m = sub_v.get("message")
|
| 152 |
+
if m:
|
| 153 |
+
parts.append(m)
|
| 154 |
+
elif isinstance(v, str):
|
| 155 |
+
parts.append(v)
|
| 156 |
+
content = " | ".join(parts)
|
| 157 |
+
else:
|
| 158 |
+
content = str(msgs)
|
| 159 |
+
lines.append(f"- [{date_str}]: {content}")
|
| 160 |
+
|
| 161 |
+
return "\n".join(lines)
|
src/agents/feature_engineering/agent.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import logging
|
| 3 |
+
from observability import logger as obs_logger
|
| 4 |
+
from observability import components as obs_components
|
| 5 |
+
from ..base import BaseAgent
|
| 6 |
+
from typing import List, Dict, Any
|
| 7 |
+
|
| 8 |
+
from ingestion.features import running_features
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class FeatureEngineeringAgent(BaseAgent):
|
| 15 |
+
"""
|
| 16 |
+
Input: list of runs as returned by parser
|
| 17 |
+
Output: a features dict with:
|
| 18 |
+
- per_run summaries (pace_min_per_km, avg_hr)
|
| 19 |
+
- time series arrays for charts
|
| 20 |
+
- weekly aggregates (mileage)
|
| 21 |
+
- consistency score (0-100)
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def run(self, runs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
| 25 |
+
with obs_logger.start_span("feature_engineering_agent.run", obs_components.AGENT):
|
| 26 |
+
from ingestion.features import compute_features_batch
|
| 27 |
+
from ingestion.weekly_features import aggregate_runs_by_week, compute_trends
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
# 1. Basic summary and charts
|
| 31 |
+
summary = running_features(runs)
|
| 32 |
+
|
| 33 |
+
# 2. Detailed per-run features
|
| 34 |
+
detailed_features = compute_features_batch(runs)
|
| 35 |
+
|
| 36 |
+
return {
|
| 37 |
+
"summary": summary,
|
| 38 |
+
"detailed_features": detailed_features,
|
| 39 |
+
"weekly_stats": {}, # Deprecated in-memory
|
| 40 |
+
"trends": {}, # Deprecated in-memory
|
| 41 |
+
}
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.error(f"Feature engineering failed: {e}")
|
| 44 |
+
raise
|
src/agents/file_integrity/agent.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class FileIntegrityAgent:
|
| 2 |
+
def run(self, strava_data, apple_data):
|
| 3 |
+
if not strava_data and not apple_data:
|
| 4 |
+
return "No valid files provided."
|
| 5 |
+
|
| 6 |
+
return "Files loaded successfully. Basic integrity OK."
|
src/agents/guardrail_agent.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from observability import logger as obs_logger
|
| 5 |
+
from observability import components as obs_components
|
| 6 |
+
from .base import BaseAgent
|
| 7 |
+
from domain.training.agent_models import RiskAssessment, WeeklySummary
|
| 8 |
+
from domain.runner.profile import RunnerProfile
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class InjuryFatigueGuardrailAgent(BaseAgent):
|
| 14 |
+
"""
|
| 15 |
+
Heuristic-based agent that assesses the risk of injury or fatigue.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def assess_risk(
|
| 19 |
+
self,
|
| 20 |
+
features: WeeklySummary,
|
| 21 |
+
pain_reported: bool = False,
|
| 22 |
+
profile: Optional[RunnerProfile] = None,
|
| 23 |
+
) -> RiskAssessment:
|
| 24 |
+
"""
|
| 25 |
+
Calculates risk level and reasons based on running features.
|
| 26 |
+
"""
|
| 27 |
+
risk_level = "LOW"
|
| 28 |
+
reasons = []
|
| 29 |
+
adjustments = []
|
| 30 |
+
metrics = {}
|
| 31 |
+
|
| 32 |
+
# 1. Mileage Spike Heuristic
|
| 33 |
+
# summary often contains 'weekly_km'
|
| 34 |
+
weekly_km = features.weekly_km
|
| 35 |
+
if weekly_km:
|
| 36 |
+
weeks = sorted(weekly_km.items(), reverse=True)
|
| 37 |
+
if len(weeks) >= 2:
|
| 38 |
+
current_week_val = weeks[0][1]
|
| 39 |
+
prev_week_val = weeks[1][1]
|
| 40 |
+
metrics["current_week_km"] = current_week_val
|
| 41 |
+
metrics["prev_week_km"] = prev_week_val
|
| 42 |
+
|
| 43 |
+
if prev_week_val > 0:
|
| 44 |
+
increase_pct = (current_week_val - prev_week_val) / prev_week_val
|
| 45 |
+
metrics["wow_increase_pct"] = increase_pct
|
| 46 |
+
|
| 47 |
+
if increase_pct > 0.5:
|
| 48 |
+
risk_level = "HIGH"
|
| 49 |
+
reasons.append(f"Major mileage spike: {increase_pct:.1%} increase Wow.")
|
| 50 |
+
adjustments.append("Reduce volume significantly; include extra rest day.")
|
| 51 |
+
elif increase_pct > 0.3:
|
| 52 |
+
if risk_level != "HIGH":
|
| 53 |
+
risk_level = "MEDIUM"
|
| 54 |
+
reasons.append(
|
| 55 |
+
f"Significant mileage increase: {increase_pct:.1%} increase WoW."
|
| 56 |
+
)
|
| 57 |
+
adjustments.append("Cap intensity; avoid two hard days in a row.")
|
| 58 |
+
|
| 59 |
+
# 1.5. Baseline Mileage Spike Heuristic
|
| 60 |
+
if (
|
| 61 |
+
profile
|
| 62 |
+
and isinstance(profile.baseline_weekly_km, (int, float))
|
| 63 |
+
and profile.baseline_weekly_km > 0
|
| 64 |
+
):
|
| 65 |
+
current_week_val = 0.0
|
| 66 |
+
if weekly_km:
|
| 67 |
+
weeks = sorted(weekly_km.items(), reverse=True)
|
| 68 |
+
if weeks:
|
| 69 |
+
current_week_val = weeks[0][1]
|
| 70 |
+
|
| 71 |
+
if current_week_val > 0:
|
| 72 |
+
baseline_increase_pct = (
|
| 73 |
+
current_week_val - profile.baseline_weekly_km
|
| 74 |
+
) / profile.baseline_weekly_km
|
| 75 |
+
metrics["baseline_increase_pct"] = baseline_increase_pct
|
| 76 |
+
|
| 77 |
+
if baseline_increase_pct > 0.6:
|
| 78 |
+
if risk_level != "HIGH":
|
| 79 |
+
risk_level = "HIGH"
|
| 80 |
+
reasons.append(
|
| 81 |
+
f"Major spike vs your baseline: {baseline_increase_pct:.1%} above baseline of {profile.baseline_weekly_km}km."
|
| 82 |
+
)
|
| 83 |
+
if "Reduce volume" not in " ".join(adjustments):
|
| 84 |
+
adjustments.append("Reduce volume to closer to your baseline.")
|
| 85 |
+
elif baseline_increase_pct > 0.4:
|
| 86 |
+
if risk_level == "LOW":
|
| 87 |
+
risk_level = "MEDIUM"
|
| 88 |
+
reasons.append(
|
| 89 |
+
f"Significant increase vs your baseline: {baseline_increase_pct:.1%} above baseline."
|
| 90 |
+
)
|
| 91 |
+
if "Cap intensity" not in " ".join(adjustments):
|
| 92 |
+
adjustments.append(
|
| 93 |
+
"Be cautious with this increase; ensure adequate recovery."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# 2. Consistency / Monotony Heuristic
|
| 97 |
+
# Feature Engineering agent provides consistency_score (0-100)
|
| 98 |
+
consistency = features.consistency_score
|
| 99 |
+
metrics["consistency_score"] = consistency
|
| 100 |
+
if consistency < 30:
|
| 101 |
+
if risk_level == "LOW":
|
| 102 |
+
risk_level = "MEDIUM"
|
| 103 |
+
reasons.append(
|
| 104 |
+
f"Low consistency score ({consistency}/100) increases injury risk during ramp-up."
|
| 105 |
+
)
|
| 106 |
+
adjustments.append("Build base gradually before adding intensity.")
|
| 107 |
+
|
| 108 |
+
# 3. Pain Reported Flag (from User or scenario)
|
| 109 |
+
metrics["pain_reported"] = pain_reported
|
| 110 |
+
if pain_reported:
|
| 111 |
+
risk_level = "HIGH"
|
| 112 |
+
reasons.append("User reported pain or discomfort.")
|
| 113 |
+
adjustments.append("Switch all remaining sessions this week to rest or cross-training.")
|
| 114 |
+
|
| 115 |
+
# 4. Fatigue markers (placeholder - heart rate drift trend if provided)
|
| 116 |
+
# In this PoC, we might have pace_trend from trends but it's simple.
|
| 117 |
+
|
| 118 |
+
return RiskAssessment(
|
| 119 |
+
risk_level=risk_level,
|
| 120 |
+
reasons=reasons,
|
| 121 |
+
recommended_adjustments=adjustments,
|
| 122 |
+
metrics_used=metrics,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
def run(
|
| 126 |
+
self,
|
| 127 |
+
features: WeeklySummary,
|
| 128 |
+
pain_reported: bool = False,
|
| 129 |
+
profile: Optional[RunnerProfile] = None,
|
| 130 |
+
) -> RiskAssessment:
|
| 131 |
+
with obs_logger.start_span("guardrail_agent.run", obs_components.AGENT):
|
| 132 |
+
return self.assess_risk(features, pain_reported, profile=profile)
|
src/agents/insights/agent.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, Union, Optional
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
import logging
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from observability import logger as obs_logger
|
| 8 |
+
from observability import components as obs_components
|
| 9 |
+
|
| 10 |
+
from domain.training.agent_models import WeeklyTrends, Insight, InsightsOutput, RiskAssessment
|
| 11 |
+
from domain.training.run import Run
|
| 12 |
+
from domain.runner.profile import RunnerProfile
|
| 13 |
+
from domain.runner.goal import Goal
|
| 14 |
+
from llm.base import LLMClient
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class InsightsAgent:
|
| 20 |
+
"""
|
| 21 |
+
Generate actionable insights for a runner using an LLM via the provided LLMClient.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, llm_client: LLMClient):
|
| 25 |
+
self.llm_client = llm_client
|
| 26 |
+
self.instruction = self._load_instruction("en")
|
| 27 |
+
|
| 28 |
+
def _load_instruction(self, language: str = "en") -> str:
|
| 29 |
+
try:
|
| 30 |
+
# Resolve path relative to this file: src/agents/insights/../../prompts/
|
| 31 |
+
base_path = Path(__file__).parent.parent.parent / "prompts"
|
| 32 |
+
filename = f"insights_{language}.txt"
|
| 33 |
+
file_path = base_path / filename
|
| 34 |
+
|
| 35 |
+
if not file_path.exists():
|
| 36 |
+
logger.warning(f"Prompt file not found: {file_path}. Falling back to English.")
|
| 37 |
+
file_path = base_path / "insights_en.txt"
|
| 38 |
+
|
| 39 |
+
if not file_path.exists():
|
| 40 |
+
# Ultimate fallback if even English file is missing
|
| 41 |
+
logger.error("English prompt file missing!")
|
| 42 |
+
return "You are an expert running coach. Generate insights in JSON."
|
| 43 |
+
|
| 44 |
+
return file_path.read_text(encoding="utf-8")
|
| 45 |
+
except Exception as e:
|
| 46 |
+
logger.error(f"Error loading prompt for language {language}: {e}")
|
| 47 |
+
return "You are an expert running coach. Generate insights in JSON."
|
| 48 |
+
|
| 49 |
+
async def run(
|
| 50 |
+
self,
|
| 51 |
+
run_features: Run,
|
| 52 |
+
weekly_trends: WeeklyTrends,
|
| 53 |
+
risk_level: str = "LOW",
|
| 54 |
+
language: str = "en",
|
| 55 |
+
profile: Optional[RunnerProfile] = None,
|
| 56 |
+
goal: Optional[Goal] = None,
|
| 57 |
+
) -> Dict[str, Any]:
|
| 58 |
+
with obs_logger.start_span("insights_agent.run", obs_components.AGENT):
|
| 59 |
+
start_time = time.time()
|
| 60 |
+
|
| 61 |
+
# Load language-specific instruction
|
| 62 |
+
self.instruction = self._load_instruction(language)
|
| 63 |
+
|
| 64 |
+
# Construct Prompt
|
| 65 |
+
prompt = self._construct_prompt(
|
| 66 |
+
run_features,
|
| 67 |
+
weekly_trends,
|
| 68 |
+
risk_level,
|
| 69 |
+
language=language,
|
| 70 |
+
profile=profile,
|
| 71 |
+
goal=goal,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
try:
|
| 75 |
+
# Call LLM via Client
|
| 76 |
+
with obs_logger.start_span("insights_agent.llm", obs_components.AGENT):
|
| 77 |
+
insights_output = await self.llm_client.generate(
|
| 78 |
+
prompt, instruction=self.instruction, schema=InsightsOutput, name="insights_agent"
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if isinstance(insights_output, InsightsOutput):
|
| 82 |
+
result = insights_output.model_dump()
|
| 83 |
+
else:
|
| 84 |
+
# Handle unexpected response type
|
| 85 |
+
logger.error(f"Unexpected response type from LLM: {type(insights_output)}")
|
| 86 |
+
result = self._fallback_error(language)
|
| 87 |
+
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
duration_ms = (time.time() - start_time) * 1000
|
| 92 |
+
obs_logger.log_event(
|
| 93 |
+
"error",
|
| 94 |
+
f"Failed to generate insights: {e}",
|
| 95 |
+
event="error",
|
| 96 |
+
component=obs_components.AGENT,
|
| 97 |
+
duration_ms=duration_ms,
|
| 98 |
+
)
|
| 99 |
+
logger.error(f"Failed to generate insights with LLM: {e}", exc_info=True)
|
| 100 |
+
return self._fallback_error(language)
|
| 101 |
+
|
| 102 |
+
def _fallback_error(self, language: str = "en") -> Dict[str, Any]:
|
| 103 |
+
is_pt = language == "pt-BR"
|
| 104 |
+
if is_pt:
|
| 105 |
+
return {
|
| 106 |
+
"error": "Não foi possível gerar os insights neste momento.",
|
| 107 |
+
"primary_lever": {"message": "Não foi possível gerar insights."},
|
| 108 |
+
"risk_signal": {"message": "Avaliação de risco indisponível."},
|
| 109 |
+
"key_observations": [],
|
| 110 |
+
"summary": {"message": "Falha na geração de insights."},
|
| 111 |
+
}
|
| 112 |
+
return {
|
| 113 |
+
"error": "Could not generate insights at this time.",
|
| 114 |
+
"primary_lever": {"message": "Could not generate insights."},
|
| 115 |
+
"risk_signal": {"message": "Risk assessment unavailable."},
|
| 116 |
+
"key_observations": [],
|
| 117 |
+
"summary": {"message": "Insights generation failed."},
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
def _construct_prompt(
|
| 121 |
+
self,
|
| 122 |
+
run: Run,
|
| 123 |
+
trends: WeeklyTrends,
|
| 124 |
+
risk_level: str,
|
| 125 |
+
language: str = "en",
|
| 126 |
+
profile: Optional[RunnerProfile] = None,
|
| 127 |
+
goal: Optional[Goal] = None,
|
| 128 |
+
) -> str:
|
| 129 |
+
is_pt = language == "pt-BR"
|
| 130 |
+
profile_context = ""
|
| 131 |
+
if profile:
|
| 132 |
+
profile_context = "\n**Runner Profile Context:**\n"
|
| 133 |
+
if profile.runner_display_name:
|
| 134 |
+
profile_context += f"- Display Name: {profile.runner_display_name}\n"
|
| 135 |
+
if profile.age:
|
| 136 |
+
profile_context += f"- Age: {profile.age}\n"
|
| 137 |
+
if profile.experience_level:
|
| 138 |
+
profile_context += f"- Experience Level: {profile.experience_level}\n"
|
| 139 |
+
if profile.baseline_weekly_km:
|
| 140 |
+
profile_context += f"- Baseline Weekly KM: {profile.baseline_weekly_km}\n"
|
| 141 |
+
if profile.injury_history_notes:
|
| 142 |
+
profile_context += f"- Injury Notes: {profile.injury_history_notes}\n"
|
| 143 |
+
|
| 144 |
+
if goal:
|
| 145 |
+
goal_type_label = goal.type.replace("_", " ").title()
|
| 146 |
+
date_str = goal.target_date.strftime("%Y-%m-%d") if goal.target_date else "N/A"
|
| 147 |
+
profile_context += f"\n**Current Active Goal:**\n"
|
| 148 |
+
profile_context += f"- Type: {goal_type_label}\n"
|
| 149 |
+
profile_context += f"- Target: {goal.target_value} {goal.unit}\n"
|
| 150 |
+
profile_context += f"- Target Date: {date_str}\n"
|
| 151 |
+
|
| 152 |
+
if is_pt:
|
| 153 |
+
return f"""
|
| 154 |
+
Analise os seguintes dados de corrida e tendências semanais.
|
| 155 |
+
Observe que nossa avaliação de risco heurística é atualmente: **{risk_level}**.
|
| 156 |
+
{profile_context}
|
| 157 |
+
|
| 158 |
+
**Dados da Corrida:**
|
| 159 |
+
- Distância: {run.total_distance_m} metros
|
| 160 |
+
- Duração: {run.total_duration_s} segundos
|
| 161 |
+
- Ritmo Médio: {run.avg_pace_min_per_km} min/km
|
| 162 |
+
- FC Média: {run.avg_hr_bpm} bpm
|
| 163 |
+
- FC Máxima: {run.max_hr_bpm} bpm
|
| 164 |
+
- Ganho de Elevação: {run.elevation_gain_m} metros
|
| 165 |
+
|
| 166 |
+
**Tendências Semanais:**
|
| 167 |
+
- Tendência de Ritmo: {trends.pace_trend_s_per_km} s/km (negativo significa mais rápido)
|
| 168 |
+
- Tendência de Distância: {trends.distance_trend_m} metros
|
| 169 |
+
- Média de Corridas/Semana: {trends.avg_runs_per_week}
|
| 170 |
+
- Monotonia: {trends.run_monotony}
|
| 171 |
+
|
| 172 |
+
Identifique:
|
| 173 |
+
1. A 'Alavanca Principal' (mudança de treino com maior prioridade).
|
| 174 |
+
2. 'Sinais de Risco' significativos (alinhados com nossa heurística de {risk_level}).
|
| 175 |
+
3. Duas 'Observações Principais' que importam para a tomada de decisão.
|
| 176 |
+
4. Um 'Resumo do Treinador' (uma frase final de encorajamento ou instrução).
|
| 177 |
+
|
| 178 |
+
IMPORTANTE: Todas as mensagens, evidências e justificativas DEVEM estar em Português do Brasil (pt-BR).
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
return f"""
|
| 182 |
+
Analyze the following run data and weekly trends.
|
| 183 |
+
Note that our heuristic risk assessment is currently: **{risk_level}**.
|
| 184 |
+
|
| 185 |
+
**Run Data:**
|
| 186 |
+
- Distance: {run.total_distance_m} meters
|
| 187 |
+
- Duration: {run.total_duration_s} seconds
|
| 188 |
+
- Avg Pace: {run.avg_pace_min_per_km} min/km
|
| 189 |
+
- Avg HR: {run.avg_hr_bpm} bpm
|
| 190 |
+
- Max HR: {run.max_hr_bpm} bpm
|
| 191 |
+
- Elevation Gain: {run.elevation_gain_m} meters
|
| 192 |
+
|
| 193 |
+
**Weekly Trends:**
|
| 194 |
+
- Pace Trend: {trends.pace_trend_s_per_km} s/km (negative means faster)
|
| 195 |
+
- Distance Trend: {trends.distance_trend_m} meters
|
| 196 |
+
- Avg Runs/Week: {trends.avg_runs_per_week}
|
| 197 |
+
- Monotony: {trends.run_monotony}
|
| 198 |
+
|
| 199 |
+
Identify:
|
| 200 |
+
1. The 'Primary Lever' (top priority training change).
|
| 201 |
+
2. Significant 'Risk Signals' (aligned with our {risk_level} heuristic).
|
| 202 |
+
3. Two 'Key Observations' that matter for decision making.
|
| 203 |
+
4. A 'Coaching Summary' (one-sentence final encouraging or instructional takeaway).
|
| 204 |
+
"""
|
src/agents/orchestrator.py
ADDED
|
@@ -0,0 +1,1170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import uuid
|
| 3 |
+
import time
|
| 4 |
+
from datetime import date, datetime, timedelta, timezone
|
| 5 |
+
from ingestion.weekly_features import week_start
|
| 6 |
+
from typing import List, Dict, Any, Optional, Set, Tuple
|
| 7 |
+
from observability import logger as obs_logger
|
| 8 |
+
from observability import components as obs_components
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# LLM imports
|
| 12 |
+
from llm import get_llm_client
|
| 13 |
+
from llm.model_registry import select_model_for_agent
|
| 14 |
+
|
| 15 |
+
# Local Agent Imports
|
| 16 |
+
from .feature_engineering.agent import FeatureEngineeringAgent
|
| 17 |
+
from .insights.agent import InsightsAgent
|
| 18 |
+
from .plan.agent import PlanAgent
|
| 19 |
+
from .visualization.agent import VisualizationAgent
|
| 20 |
+
from .chat_agent import ChatAgent
|
| 21 |
+
from .guardrail_agent import InjuryFatigueGuardrailAgent
|
| 22 |
+
|
| 23 |
+
from tools.stores import InMemoryFeatureStore, InMemoryPlanStore
|
| 24 |
+
from tools.viz_executor import TemplateVisualizationExecutor
|
| 25 |
+
|
| 26 |
+
from router.router import async_route
|
| 27 |
+
from router.models import RouteDecision
|
| 28 |
+
|
| 29 |
+
# New domain imports
|
| 30 |
+
from domain.runner.profile import RunnerProfile
|
| 31 |
+
from domain.training.run import Run
|
| 32 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 33 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 34 |
+
from domain.training.agent_models import AnalysisRecord, WeeklyTrends, RiskAssessment
|
| 35 |
+
from domain.goals.goal_trajectory import GoalTrajectory
|
| 36 |
+
from core.intelligence.runner_intelligence_snapshot import RunnerIntelligenceSnapshot
|
| 37 |
+
|
| 38 |
+
# New persistence imports
|
| 39 |
+
from persistence.db import Database
|
| 40 |
+
from persistence.repositories.runner_repo import RunnerRepository
|
| 41 |
+
from persistence.repositories.run_repo import RunRepository
|
| 42 |
+
from persistence.repositories.weekly_repo import WeeklySnapshotRepository
|
| 43 |
+
from persistence.repositories.trend_repo import TrendSnapshotRepository
|
| 44 |
+
from persistence.repositories.analysis_repo import AnalysisRepository
|
| 45 |
+
from persistence.repositories.goal_repo import SqlGoalRepository
|
| 46 |
+
from persistence.repositories.null_goal_repo import NullGoalRepository
|
| 47 |
+
from persistence.repositories.planned_session_repository import PlannedSessionRepository
|
| 48 |
+
|
| 49 |
+
# New engine imports
|
| 50 |
+
from engines.trend.trend_engine import TrendEngine
|
| 51 |
+
|
| 52 |
+
# New stateless builders
|
| 53 |
+
from domain.training.weekly_snapshot_builder import WeeklySnapshotBuilder
|
| 54 |
+
from domain.training.weekly_trend_builder import WeeklyTrendBuilder
|
| 55 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 56 |
+
|
| 57 |
+
# New service imports
|
| 58 |
+
from services.snapshot_service import SnapshotService
|
| 59 |
+
from services.brief_service import BriefService
|
| 60 |
+
from services.goal_service import GoalService
|
| 61 |
+
from services.structure_service import StructureService
|
| 62 |
+
from services.run_persistence_service import RunPersistenceService
|
| 63 |
+
|
| 64 |
+
from application.runner_positioning_service import RunnerPositioningService
|
| 65 |
+
from domain.runner_positioning import RunnerPositioning
|
| 66 |
+
from application.positioning_service import WeeklyPositioning, PositioningEngine
|
| 67 |
+
from application.recommendation_service import RecommendationService
|
| 68 |
+
from services.guardrail_arbitration_service import GuardrailArbitrationService
|
| 69 |
+
from services.positioning_change_service import PositioningChangeService
|
| 70 |
+
from services.goal_progress_service import GoalProgressService
|
| 71 |
+
from application.goal_trajectory_service import GoalTrajectoryService
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
import config
|
| 75 |
+
|
| 76 |
+
# Pipeline imports
|
| 77 |
+
from core.pipeline.pipeline import RunnerPipeline
|
| 78 |
+
from core.pipeline.context import PipelineContext
|
| 79 |
+
from pipeline_steps.feature_engineering_step import FeatureEngineeringStep
|
| 80 |
+
from pipeline_steps.persist_runs_step import PersistRunsStep
|
| 81 |
+
from pipeline_steps.structure_step import StructureStep
|
| 82 |
+
from pipeline_steps.snapshot_step import SnapshotStep
|
| 83 |
+
from pipeline_steps.guardrail_step import GuardrailStep
|
| 84 |
+
from pipeline_steps.positioning_step import PositioningStep
|
| 85 |
+
from pipeline_steps.goal_trajectory_step import GoalTrajectoryStep
|
| 86 |
+
from pipeline_steps.intelligence_step import IntelligenceStep
|
| 87 |
+
from pipeline_steps.visualization_step import VisualizationStep
|
| 88 |
+
from pipeline_steps.comparison_step import ComparisonStep
|
| 89 |
+
from pipeline_steps.persist_analysis_step import PersistAnalysisStep
|
| 90 |
+
|
| 91 |
+
from core.intelligence.intelligence_builder import build_intelligence_snapshot
|
| 92 |
+
from application.dto.runner_api_response import RunnerAPIResponse
|
| 93 |
+
from core.intelligence.intelligence_serializer import serialize_snapshot
|
| 94 |
+
|
| 95 |
+
# Configure logging
|
| 96 |
+
logger = logging.getLogger(__name__)
|
| 97 |
+
|
| 98 |
+
class RunnerOrchestrator:
|
| 99 |
+
"""
|
| 100 |
+
Central orchestration component for Runner Intelligence.
|
| 101 |
+
|
| 102 |
+
Coordinates pipeline execution, agents, and services using the
|
| 103 |
+
RunnerPipeline architecture.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self, feature_store=None, plan_store=None, viz_executor=None, db=None):
|
| 107 |
+
logger.info("Initializing RunnerOrchestrator")
|
| 108 |
+
self.session_id = config.generate_session_id()
|
| 109 |
+
|
| 110 |
+
# Tool boundaries
|
| 111 |
+
self.feature_store = feature_store or InMemoryFeatureStore()
|
| 112 |
+
self.plan_store = plan_store or InMemoryPlanStore()
|
| 113 |
+
self.viz_executor = viz_executor or TemplateVisualizationExecutor()
|
| 114 |
+
|
| 115 |
+
# New persistence layer
|
| 116 |
+
self.db = db
|
| 117 |
+
self.runner_repo = None
|
| 118 |
+
self.run_repo = None
|
| 119 |
+
self.weekly_repo = None
|
| 120 |
+
self.trend_repo = None
|
| 121 |
+
self.analysis_repo = None
|
| 122 |
+
self.goal_repo = None
|
| 123 |
+
self.planned_repo = None
|
| 124 |
+
self.snapshot_service = None
|
| 125 |
+
|
| 126 |
+
# Mode Discipline: Resolve storage implementation at Orchestrator boundary
|
| 127 |
+
if not self.db:
|
| 128 |
+
if config.is_persistence_enabled():
|
| 129 |
+
self.db = Database(config.STORAGE_DB_PATH)
|
| 130 |
+
else:
|
| 131 |
+
# Fallback to volatile in-memory storage for HF Spaces/transient sessions
|
| 132 |
+
self.db = Database(":memory:")
|
| 133 |
+
|
| 134 |
+
self.db.initialize_schema()
|
| 135 |
+
|
| 136 |
+
# Inject dependencies
|
| 137 |
+
self.runner_repo = RunnerRepository(self.db)
|
| 138 |
+
self.run_repo = RunRepository(self.db)
|
| 139 |
+
self.weekly_repo = WeeklySnapshotRepository(self.db)
|
| 140 |
+
self.trend_repo = TrendSnapshotRepository(self.db)
|
| 141 |
+
self.analysis_repo = AnalysisRepository(self.db)
|
| 142 |
+
if config.is_persistence_enabled():
|
| 143 |
+
self.goal_repo = SqlGoalRepository(self.db)
|
| 144 |
+
else:
|
| 145 |
+
self.goal_repo = NullGoalRepository()
|
| 146 |
+
|
| 147 |
+
self.planned_repo = PlannedSessionRepository(self.db)
|
| 148 |
+
self.intelligence_cache: Dict[str, bool] = {}
|
| 149 |
+
self.runs_cache: List[Dict[str, Any]] = []
|
| 150 |
+
|
| 151 |
+
# Services receive resolved dependencies
|
| 152 |
+
self.structure_service = StructureService(
|
| 153 |
+
planned_repo=self.planned_repo,
|
| 154 |
+
goal_repo=self.goal_repo,
|
| 155 |
+
runner_repo=self.runner_repo,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
self.trend_engine = TrendEngine()
|
| 159 |
+
self.run_persistence_service = RunPersistenceService(self.run_repo)
|
| 160 |
+
|
| 161 |
+
# Stateless Builders
|
| 162 |
+
self.weekly_snapshot_builder = WeeklySnapshotBuilder()
|
| 163 |
+
self.weekly_trend_builder = WeeklyTrendBuilder()
|
| 164 |
+
|
| 165 |
+
self.snapshot_service = SnapshotService(
|
| 166 |
+
runner_repo=self.runner_repo,
|
| 167 |
+
run_repo=self.run_repo,
|
| 168 |
+
weekly_repo=self.weekly_repo,
|
| 169 |
+
trend_repo=self.trend_repo,
|
| 170 |
+
weekly_builder=self.weekly_snapshot_builder,
|
| 171 |
+
trend_engine=self.trend_engine,
|
| 172 |
+
structure_service=self.structure_service,
|
| 173 |
+
brief_service=None, # Will be set after brief_service is initialized
|
| 174 |
+
)
|
| 175 |
+
self.goal_service = GoalService(goal_repo=self.goal_repo)
|
| 176 |
+
self.positioning_service = RunnerPositioningService()
|
| 177 |
+
self.positioning_engine = PositioningEngine()
|
| 178 |
+
self.recommendation_service = RecommendationService()
|
| 179 |
+
|
| 180 |
+
# New isolation services
|
| 181 |
+
self.guardrail_arbitration_service = GuardrailArbitrationService()
|
| 182 |
+
self.positioning_change_service = PositioningChangeService()
|
| 183 |
+
self.goal_progress_service = GoalProgressService()
|
| 184 |
+
self.goal_trajectory_service = GoalTrajectoryService()
|
| 185 |
+
|
| 186 |
+
# LLM Clients per Agent
|
| 187 |
+
def _get_agent_client(agent_name: str):
|
| 188 |
+
profile = select_model_for_agent(agent_name)
|
| 189 |
+
return get_llm_client(
|
| 190 |
+
provider=profile.provider,
|
| 191 |
+
model_name=profile.model_name,
|
| 192 |
+
temperature=config.LLM_TEMPERATURE,
|
| 193 |
+
max_tokens=config.LLM_MAX_TOKENS,
|
| 194 |
+
drop_params=getattr(config, "LLM_DROP_PARAMS", False),
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# Instantiate agents
|
| 198 |
+
logger.info("Instantiating agents with specific model profiles")
|
| 199 |
+
self.feature_agent = FeatureEngineeringAgent()
|
| 200 |
+
self.insights_agent = InsightsAgent(llm_client=_get_agent_client("InsightsAgent"))
|
| 201 |
+
self.plan_agent = PlanAgent(llm_client=_get_agent_client("PlanAgent"))
|
| 202 |
+
self.visualization_agent = VisualizationAgent(
|
| 203 |
+
llm_client=_get_agent_client("VisualizationAgent")
|
| 204 |
+
)
|
| 205 |
+
self.chat_agent = ChatAgent(llm_client=_get_agent_client("ChatAgent"))
|
| 206 |
+
|
| 207 |
+
# We also need a client for the router
|
| 208 |
+
self.router_client = _get_agent_client("Router")
|
| 209 |
+
self.guardrail_agent = InjuryFatigueGuardrailAgent()
|
| 210 |
+
|
| 211 |
+
# Instantiate dedicated BriefService
|
| 212 |
+
self.brief_service = BriefService(llm_client=_get_agent_client("BriefService"))
|
| 213 |
+
self.snapshot_service.brief_service = self.brief_service
|
| 214 |
+
|
| 215 |
+
# Latest results state
|
| 216 |
+
self.latest_insights = {}
|
| 217 |
+
self.latest_plan = None
|
| 218 |
+
self.latest_summary = {}
|
| 219 |
+
self.latest_trends = {}
|
| 220 |
+
self.latest_risk_assessment = None
|
| 221 |
+
self.latest_goal_trajectory: Optional[GoalTrajectory] = None
|
| 222 |
+
|
| 223 |
+
# Session snapshot cache for stateless mode history
|
| 224 |
+
self.session_snapshots: Dict[date, WeeklySnapshot] = {}
|
| 225 |
+
self.session_runner_positioning: Dict[date, RunnerPositioning] = {}
|
| 226 |
+
self.session_view_positioning: Dict[date, WeeklyPositioning] = {}
|
| 227 |
+
self.session_goal_trajectory: Dict[date, GoalTrajectory] = {}
|
| 228 |
+
|
| 229 |
+
# State-driven intelligence cache (v1 optimization)
|
| 230 |
+
self._last_positioning: Optional[RunnerPositioning] = None
|
| 231 |
+
self._last_insights: Dict[str, Any] = {}
|
| 232 |
+
self._last_plan: Optional[str] = None
|
| 233 |
+
self._last_brief: str = ""
|
| 234 |
+
self._last_focus: str = ""
|
| 235 |
+
self._last_runner_positioning: Optional[RunnerPositioning] = None
|
| 236 |
+
|
| 237 |
+
# State-driven intelligence cache (v2 optimization)
|
| 238 |
+
# Keyed by (week_start, language) to prevent stale content on language switch
|
| 239 |
+
self.intelligence_snapshots: Dict[Tuple[date, str], RunnerIntelligenceSnapshot] = {}
|
| 240 |
+
if self.snapshot_service and hasattr(self.snapshot_service, "intelligence_snapshots"):
|
| 241 |
+
# Ensure we are not accidentally using a Mock from a test environment
|
| 242 |
+
actual_cache = self.snapshot_service.intelligence_snapshots
|
| 243 |
+
if isinstance(actual_cache, dict):
|
| 244 |
+
# We expect the cache to be empty or already Tuple-keyed if from a previous run in the same session.
|
| 245 |
+
# If it has date-only keys (from a previous version), they will be ignored/overwritten.
|
| 246 |
+
self.intelligence_snapshots = actual_cache
|
| 247 |
+
|
| 248 |
+
# DTO API Response
|
| 249 |
+
self.dto_response = RunnerAPIResponse(
|
| 250 |
+
num_runs=0,
|
| 251 |
+
weeks=0,
|
| 252 |
+
features=[],
|
| 253 |
+
insights={},
|
| 254 |
+
plan=None,
|
| 255 |
+
risk_assessment=None,
|
| 256 |
+
trends={},
|
| 257 |
+
period_comparison=None,
|
| 258 |
+
charts={},
|
| 259 |
+
profile=None,
|
| 260 |
+
goal_progress=None,
|
| 261 |
+
goal_trajectory=None,
|
| 262 |
+
positioning=None,
|
| 263 |
+
recommendation=None,
|
| 264 |
+
intelligence_snapshot=None,
|
| 265 |
+
active_goal=None,
|
| 266 |
+
mode="RunnerOrchestrator"
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
logger.info("RunnerOrchestrator initialized")
|
| 270 |
+
|
| 271 |
+
async def _initialize(self):
|
| 272 |
+
"""Asynchronous initialization (RunnerOrchestrator session initialization removed)."""
|
| 273 |
+
logger.info("Orchestrator skipping RunnerOrchestrator session initialization")
|
| 274 |
+
pass
|
| 275 |
+
|
| 276 |
+
def _select_active_weeks(self, all_affected_weeks: List[date]) -> Set[date]:
|
| 277 |
+
"""
|
| 278 |
+
Selects weeks for which full intelligence (LLM) should be generated.
|
| 279 |
+
Standard: Current week + Previous week.
|
| 280 |
+
"""
|
| 281 |
+
if not all_affected_weeks:
|
| 282 |
+
return set()
|
| 283 |
+
|
| 284 |
+
sorted_weeks = sorted(all_affected_weeks)
|
| 285 |
+
current_week = sorted_weeks[-1]
|
| 286 |
+
|
| 287 |
+
active_weeks = {current_week}
|
| 288 |
+
if len(sorted_weeks) > 1:
|
| 289 |
+
active_weeks.add(sorted_weeks[-2])
|
| 290 |
+
|
| 291 |
+
return active_weeks
|
| 292 |
+
|
| 293 |
+
def reset(self):
|
| 294 |
+
import config
|
| 295 |
+
"""Resets the orchestrator state by clearing stores and results."""
|
| 296 |
+
logger.info("Resetting orchestrator state")
|
| 297 |
+
self.db = None
|
| 298 |
+
self.weekly_repo.delete_all_for_runner(config.DEFAULT_RUNNER_ID)
|
| 299 |
+
self.analysis_repo = None
|
| 300 |
+
self.feature_store.clear()
|
| 301 |
+
self.plan_store.clear()
|
| 302 |
+
self.session_snapshots = {}
|
| 303 |
+
self.runner_profile = None
|
| 304 |
+
self.intelligence_snapshots = {}
|
| 305 |
+
self.session_runner_positioning = {}
|
| 306 |
+
self.session_view_positioning = {}
|
| 307 |
+
self.session_goal_trajectory = {}
|
| 308 |
+
self.latest_goal_trajectory = None
|
| 309 |
+
self._last_positioning = None
|
| 310 |
+
self._last_insights = {}
|
| 311 |
+
self._last_plan = None
|
| 312 |
+
self._last_brief = ""
|
| 313 |
+
self._last_focus = ""
|
| 314 |
+
self._last_runner_positioning = None
|
| 315 |
+
self.latest_goal_trajectory = None
|
| 316 |
+
self.latest_positioning = None
|
| 317 |
+
self.latest_plan = None
|
| 318 |
+
self.latest_brief = ""
|
| 319 |
+
self.latest_focus = ""
|
| 320 |
+
self.latest_positioning = None
|
| 321 |
+
self.latest_goal_trajectory = None
|
| 322 |
+
self.latest_insights = {}
|
| 323 |
+
self.latest_plan = None
|
| 324 |
+
self.latest_summary = {}
|
| 325 |
+
self.latest_risk_assessment = None
|
| 326 |
+
|
| 327 |
+
async def run(
|
| 328 |
+
self,
|
| 329 |
+
runs: List[Dict[str, Any]],
|
| 330 |
+
language: str = "en",
|
| 331 |
+
target_monday: Optional[date] = None,
|
| 332 |
+
weeks: Optional[List[date]] = None,
|
| 333 |
+
mode: str = "default"
|
| 334 |
+
):
|
| 335 |
+
obs_logger.bind_new_trace_id()
|
| 336 |
+
with obs_logger.start_span("orchestrator.run", obs_components.ORCHESTRATOR):
|
| 337 |
+
start_time = time.time()
|
| 338 |
+
logger.info(f"Starting orchestration run with trace_id: {obs_logger.get_trace_id()}")
|
| 339 |
+
print(f"DEBUG: Orchestrator.run called with {len(runs)} runs")
|
| 340 |
+
self.runs_cache = runs
|
| 341 |
+
|
| 342 |
+
# 1. Initialize Pipeline Context
|
| 343 |
+
context = PipelineContext(
|
| 344 |
+
runs=runs,
|
| 345 |
+
language=language,
|
| 346 |
+
target_monday=target_monday,
|
| 347 |
+
)
|
| 348 |
+
context.sessions = self.session_snapshots # Link to session cache
|
| 349 |
+
|
| 350 |
+
# Restore state for intelligence reuse
|
| 351 |
+
context.last_positioning = self._last_positioning
|
| 352 |
+
context.last_insights = self._last_insights
|
| 353 |
+
context.last_plan = self._last_plan
|
| 354 |
+
context.last_brief = self._last_brief
|
| 355 |
+
context.last_focus = self._last_focus
|
| 356 |
+
context.last_runner_positioning = self._last_runner_positioning
|
| 357 |
+
#context.last_positioning = self._last_positioning
|
| 358 |
+
|
| 359 |
+
# Load or create profile
|
| 360 |
+
from datetime import datetime
|
| 361 |
+
if self.runner_repo:
|
| 362 |
+
profile = self.runner_repo.get_runner_profile(uuid.UUID(config.DEFAULT_RUNNER_ID))
|
| 363 |
+
if not profile:
|
| 364 |
+
logger.info("No profile found, creating default")
|
| 365 |
+
profile = RunnerProfile(
|
| 366 |
+
runner_id=uuid.UUID(config.DEFAULT_RUNNER_ID),
|
| 367 |
+
created_at=datetime.now(timezone.utc),
|
| 368 |
+
updated_at=datetime.now(timezone.utc),
|
| 369 |
+
)
|
| 370 |
+
self.runner_repo.save(profile)
|
| 371 |
+
context.runner_profile = profile
|
| 372 |
+
self.runner_profile = profile
|
| 373 |
+
|
| 374 |
+
if self.goal_repo:
|
| 375 |
+
context.active_goal = self.goal_repo.get_active_goal(uuid.UUID(config.DEFAULT_RUNNER_ID))
|
| 376 |
+
|
| 377 |
+
# 2. Build Pipeline
|
| 378 |
+
pipeline = RunnerPipeline([
|
| 379 |
+
FeatureEngineeringStep(self.feature_agent),
|
| 380 |
+
PersistRunsStep(self.run_persistence_service),
|
| 381 |
+
StructureStep(self.structure_service),
|
| 382 |
+
SnapshotStep(self.snapshot_service, self.weekly_trend_builder),
|
| 383 |
+
GuardrailStep(self.guardrail_agent),
|
| 384 |
+
ComparisonStep(self.weekly_repo),
|
| 385 |
+
PersistAnalysisStep(self.analysis_repo)
|
| 386 |
+
])
|
| 387 |
+
|
| 388 |
+
# 3. Execute Pipeline
|
| 389 |
+
try:
|
| 390 |
+
await pipeline.execute(context)
|
| 391 |
+
|
| 392 |
+
# --- MULTI-WEEK INTELLIGENCE GENERATION ---
|
| 393 |
+
# SnapshotStep populated context.sessions. We ensure each has a full intelligence snapshot.
|
| 394 |
+
affected_weeks = context.sessions.keys()
|
| 395 |
+
runner_id = str(uuid.UUID(config.DEFAULT_RUNNER_ID))
|
| 396 |
+
self.snapshot_service.invalidate_weeks(runner_id, list(affected_weeks))
|
| 397 |
+
|
| 398 |
+
all_affected_weeks = sorted(affected_weeks)
|
| 399 |
+
|
| 400 |
+
# Filter by requested weeks if provided (Lazy Intelligence)
|
| 401 |
+
if weeks:
|
| 402 |
+
all_affected_weeks = [w for w in all_affected_weeks if w in weeks]
|
| 403 |
+
|
| 404 |
+
active_weeks = self._select_active_weeks(all_affected_weeks)
|
| 405 |
+
|
| 406 |
+
# Capture global state before loop splits context
|
| 407 |
+
full_runs = context.runs
|
| 408 |
+
risk_assessment = context.risk_assessment
|
| 409 |
+
active_goal = context.active_goal
|
| 410 |
+
language = context.language
|
| 411 |
+
|
| 412 |
+
# EARLY OPTIMIZATION: If no sessions were affected (e.g. all data was clean and cached), we can skip the multi-week loop entirely.
|
| 413 |
+
# Preserve primary target (usually latest) for final context state
|
| 414 |
+
primary_week = getattr(context.weekly_snapshot,"week_start_date", None)
|
| 415 |
+
|
| 416 |
+
for w_start in all_affected_weeks:
|
| 417 |
+
# In on_demand mode (Lazy), we force intelligence for all requested weeks
|
| 418 |
+
if mode == "on_demand":
|
| 419 |
+
enable_intelligence = True
|
| 420 |
+
logger.info(f"[Lazy Intelligence] Triggered for week {w_start}")
|
| 421 |
+
else:
|
| 422 |
+
enable_intelligence = w_start in active_weeks
|
| 423 |
+
|
| 424 |
+
# NEW: Fetch ALL runs for this week from repository to ensure full scope for viz and intel
|
| 425 |
+
# This fixes the issue where only newly uploaded runs were used for weekly charts.
|
| 426 |
+
all_week_runs = []
|
| 427 |
+
if self.run_repo:
|
| 428 |
+
runner_id_str = str(uuid.UUID(config.DEFAULT_RUNNER_ID))
|
| 429 |
+
all_week_runs = self.run_repo.get_runs_for_week(runner_id_str, w_start)
|
| 430 |
+
|
| 431 |
+
# Fallback for tests/in-memory: if repo is empty, use uploaded runs from context
|
| 432 |
+
if not all_week_runs and context.runs:
|
| 433 |
+
all_week_runs = [r for r in context.runs if week_start(r.get("start_time") if isinstance(r, dict) else r.start_time).date() == w_start]
|
| 434 |
+
logger.info(f"[Orchestrator] Week {w_start} repository empty, fallback to context.runs (found {len(all_week_runs)})")
|
| 435 |
+
|
| 436 |
+
# Ensure chronological order for IntelligenceStep.runs[-1]
|
| 437 |
+
def run_start_time(r):
|
| 438 |
+
if isinstance(r, dict):
|
| 439 |
+
return r.get('start_time') or datetime.min.replace(tzinfo=timezone.utc)
|
| 440 |
+
return getattr(r, 'start_time', datetime.min.replace(tzinfo=timezone.utc))
|
| 441 |
+
|
| 442 |
+
all_week_runs = sorted(all_week_runs, key=run_start_time)
|
| 443 |
+
|
| 444 |
+
# Create a fresh context for each week to avoid leak but preserve sessions
|
| 445 |
+
week_context = PipelineContext(
|
| 446 |
+
runner_profile=self.runner_profile,
|
| 447 |
+
sessions=context.sessions,
|
| 448 |
+
runs=all_week_runs, # Use all runs for the week (needed for IntelligenceStep)
|
| 449 |
+
risk_assessment=risk_assessment,
|
| 450 |
+
active_goal=active_goal,
|
| 451 |
+
language=language,
|
| 452 |
+
enable_intelligence=enable_intelligence,
|
| 453 |
+
visualization_scope="weekly",
|
| 454 |
+
# Pass cache to per-week context
|
| 455 |
+
last_runner_positioning=self._last_runner_positioning,
|
| 456 |
+
last_positioning=self._last_positioning,
|
| 457 |
+
last_insights=self._last_insights,
|
| 458 |
+
last_plan=self._last_plan,
|
| 459 |
+
last_brief=self._last_brief,
|
| 460 |
+
last_focus=self._last_focus,
|
| 461 |
+
intelligence_cache=self.intelligence_cache
|
| 462 |
+
)
|
| 463 |
+
# Keep week-specific runs separately for weekly metrics if needed
|
| 464 |
+
week_context.week_runs = all_week_runs # Use all runs for VisualizationStep
|
| 465 |
+
logger.info(f"[Orchestrator] Week {w_start} → total week_runs: {len(all_week_runs)}")
|
| 466 |
+
|
| 467 |
+
# 1. Setup focus for this specific week
|
| 468 |
+
week_context.weekly_snapshot = context.sessions[w_start]
|
| 469 |
+
|
| 470 |
+
# Compute WeeklyTrend (UI/Agent model) with historical context
|
| 471 |
+
all_history = self.snapshot_service.get_history(12)
|
| 472 |
+
hist_before = [h for h in all_history if h.week_start_date < w_start]
|
| 473 |
+
week_context.weekly_trend = self.weekly_trend_builder.build(week_context.weekly_snapshot, hist_before)
|
| 474 |
+
|
| 475 |
+
# 2. Refresh Agent context (summary/trends) for this specific week
|
| 476 |
+
summary, trends = self.snapshot_service.build_agent_context_for_week(w_start)
|
| 477 |
+
week_context.summary = summary
|
| 478 |
+
week_context.trends = trends
|
| 479 |
+
|
| 480 |
+
# --- OPTIMIZATION: DIRTY CHECK ---
|
| 481 |
+
is_dirty = self.snapshot_service.is_week_dirty(w_start, summary)
|
| 482 |
+
if mode != "on_demand" and not is_dirty:
|
| 483 |
+
logger.info(f"[Orchestrator] Multi-week: Week {w_start} is CLEAN. Reusing existing snapshot.")
|
| 484 |
+
cached_snap = self.snapshot_service.get_snapshot(w_start, language=language)
|
| 485 |
+
week_context.intelligence_snapshot = cached_snap
|
| 486 |
+
|
| 487 |
+
# Restore context fields from snapshot for API response consistency
|
| 488 |
+
if cached_snap:
|
| 489 |
+
week_context.insights = cached_snap.insights
|
| 490 |
+
week_context.plan = cached_snap.insights.get("plan") if isinstance(cached_snap.insights, dict) else getattr(cached_snap.insights, "plan", None)
|
| 491 |
+
week_context.positioning = cached_snap.positioning
|
| 492 |
+
week_context.positioning_view = cached_snap.positioning_view
|
| 493 |
+
week_context.recommendation = cached_snap.recommendation
|
| 494 |
+
week_context.trends = cached_snap.trend
|
| 495 |
+
week_context.goal_trajectory = cached_snap.goal_trajectory_data
|
| 496 |
+
week_context.charts = cached_snap.charts
|
| 497 |
+
|
| 498 |
+
# Store in global intelligence_snapshots for later resolution
|
| 499 |
+
if cached_snap:
|
| 500 |
+
self.intelligence_snapshots[(w_start, language)] = cached_snap
|
| 501 |
+
|
| 502 |
+
continue
|
| 503 |
+
|
| 504 |
+
# --- LAZY INTELLIGENCE: SKIP IF ALREADY AVAILABLE ---
|
| 505 |
+
existing_snapshot = self.snapshot_service.get_snapshot(w_start, language=language)
|
| 506 |
+
if mode != "on_demand" and self.snapshot_service.has_intelligence(existing_snapshot) and not is_dirty:
|
| 507 |
+
logger.info(f"[Lazy Intelligence] Skipped (already available) for week {w_start}")
|
| 508 |
+
week_context.intelligence_snapshot = existing_snapshot
|
| 509 |
+
# Restore context fields (brief already in snapshot)
|
| 510 |
+
if existing_snapshot:
|
| 511 |
+
week_context.insights = existing_snapshot.insights
|
| 512 |
+
week_context.plan = existing_snapshot.insights.get("plan") if isinstance(existing_snapshot.insights, dict) else getattr(existing_snapshot.insights, "plan", None)
|
| 513 |
+
week_context.positioning = existing_snapshot.positioning
|
| 514 |
+
week_context.positioning_view = existing_snapshot.positioning_view
|
| 515 |
+
week_context.recommendation = existing_snapshot.recommendation
|
| 516 |
+
week_context.trends = existing_snapshot.trend
|
| 517 |
+
week_context.goal_trajectory = existing_snapshot.goal_trajectory_data
|
| 518 |
+
week_context.charts = existing_snapshot.charts
|
| 519 |
+
|
| 520 |
+
self.intelligence_snapshots[(w_start, language)] = existing_snapshot
|
| 521 |
+
|
| 522 |
+
continue
|
| 523 |
+
|
| 524 |
+
logger.info(f"[Orchestrator] Multi-week: Week {w_start} is DIRTY or ON-DEMAND. Recomputing intelligence.")
|
| 525 |
+
|
| 526 |
+
# NOT returning goal_trajectory / goal_progress data
|
| 527 |
+
await self._run_intelligence_for_context(week_context, enable_intelligence=enable_intelligence)
|
| 528 |
+
|
| 529 |
+
# 4. Produce and Store Intelligence Snapshot
|
| 530 |
+
snapshot = build_intelligence_snapshot(week_context)
|
| 531 |
+
self.snapshot_service.store_snapshot(w_start, snapshot, language=language)
|
| 532 |
+
self.intelligence_snapshots[(w_start, language)] = snapshot # Fixed: Must update cache for results
|
| 533 |
+
|
| 534 |
+
if runs: # Only persist if there are new runs uploaded otherwise charts exists in the snapshot
|
| 535 |
+
self.snapshot_service.persist_intelligence_snapshot(snapshot)
|
| 536 |
+
logger.info(f"[DEBUG] Persist charts → snapshot_id={snapshot.id}")
|
| 537 |
+
logger.debug(f"[Orchestrator] Multi-week: Generated and stored intelligence snapshot for {w_start} ({language})")
|
| 538 |
+
|
| 539 |
+
# Restore primary focus for the API response
|
| 540 |
+
if primary_week and primary_week in context.sessions:
|
| 541 |
+
context.weekly_snapshot = context.sessions[primary_week]
|
| 542 |
+
context.weekly_trend = self.trend_repo.get_by_week(primary_week) if self.trend_repo else None
|
| 543 |
+
summary, trends = self.snapshot_service.build_agent_context_for_week(primary_week)
|
| 544 |
+
context.summary = summary
|
| 545 |
+
context.trends = trends
|
| 546 |
+
snap = self.intelligence_snapshots.get((primary_week, language))
|
| 547 |
+
context.intelligence_snapshot = snap
|
| 548 |
+
if snap:
|
| 549 |
+
context.insights = snap.insights
|
| 550 |
+
context.plan = snap.plan
|
| 551 |
+
context.recommendation = snap.recommendation
|
| 552 |
+
context.positioning = snap.positioning
|
| 553 |
+
context.goal_trajectory = snap.goal_trajectory_data
|
| 554 |
+
context.charts = snap.charts # PRESERVED: DO NOT RESTORE SNAPSHOT CHARTS TO MAIN CONTEXT
|
| 555 |
+
context.weekly_brief = snap.weekly_brief
|
| 556 |
+
context.weekly_focus = snap.weekly_focus
|
| 557 |
+
context.goal_view = snap.goal_view
|
| 558 |
+
|
| 559 |
+
# --- GENERATE FULL SCOPE CHARTS ---
|
| 560 |
+
# These are aggregated across all uploaded runs and assigned to the top-level context.
|
| 561 |
+
if runs and len(all_affected_weeks) > 1:
|
| 562 |
+
logger.info("[Orchestrator] Generating FULL SCOPE charts for top-level results")
|
| 563 |
+
context.visualization_scope = "full"
|
| 564 |
+
viz_step = VisualizationStep(self.visualization_agent, self.viz_executor)
|
| 565 |
+
await viz_step.run(context)
|
| 566 |
+
|
| 567 |
+
# Calculate num_runs and weeks for the response metadata
|
| 568 |
+
num_runs = len(runs)
|
| 569 |
+
weeks = 0
|
| 570 |
+
if runs:
|
| 571 |
+
from datetime import datetime
|
| 572 |
+
dates = []
|
| 573 |
+
for r in runs:
|
| 574 |
+
d = r.get("start_time")
|
| 575 |
+
if isinstance(d, datetime):
|
| 576 |
+
dates.append(d)
|
| 577 |
+
elif isinstance(d, str):
|
| 578 |
+
try:
|
| 579 |
+
dates.append(datetime.fromisoformat(d.replace("Z", "+00:00")))
|
| 580 |
+
except:
|
| 581 |
+
pass
|
| 582 |
+
if dates:
|
| 583 |
+
delta = max(dates) - min(dates)
|
| 584 |
+
weeks = max(1, (delta.days // 7) + 1)
|
| 585 |
+
|
| 586 |
+
# Update legacy stores for backward compatibility
|
| 587 |
+
detailed_features = [
|
| 588 |
+
f.model_dump(mode="json") if hasattr(f, "model_dump") else f for f in context.runs
|
| 589 |
+
]
|
| 590 |
+
for feat in detailed_features:
|
| 591 |
+
run_id = feat.get("id") or str(uuid.uuid4())
|
| 592 |
+
self.feature_store.put_features(run_id, feat)
|
| 593 |
+
|
| 594 |
+
if context.summary:
|
| 595 |
+
self.feature_store.put_weekly_summary("latest", context.summary)
|
| 596 |
+
|
| 597 |
+
if context.plan:
|
| 598 |
+
self.plan_store.save_plan("latest", context.plan)
|
| 599 |
+
|
| 600 |
+
# Store latest in orchestrator for chat consistency
|
| 601 |
+
self.latest_insights = context.insights
|
| 602 |
+
self.latest_plan = context.plan
|
| 603 |
+
self.latest_summary = context.summary
|
| 604 |
+
self.latest_risk_assessment = context.risk_assessment
|
| 605 |
+
|
| 606 |
+
# Update orchestrator cache from context
|
| 607 |
+
self._last_runner_positioning = context.last_runner_positioning
|
| 608 |
+
self._last_positioning = context.last_positioning
|
| 609 |
+
self._last_insights = context.last_insights
|
| 610 |
+
self._last_plan = context.last_plan
|
| 611 |
+
self._last_brief = context.last_brief
|
| 612 |
+
self._last_focus = context.last_focus
|
| 613 |
+
self.latest_goal_trajectory = getattr(context, "goal_trajectory", None)
|
| 614 |
+
if self.latest_goal_trajectory and context.weekly_snapshot:
|
| 615 |
+
self.session_goal_trajectory[context.weekly_snapshot.week_start_date] = self.latest_goal_trajectory
|
| 616 |
+
|
| 617 |
+
logger.info("Orchestration run completed via Pipeline architecture")
|
| 618 |
+
|
| 619 |
+
# Helper for safe DTO dumping (handles both objects and dicts from cache)
|
| 620 |
+
def _safe_dump(obj):
|
| 621 |
+
if obj is None: return None
|
| 622 |
+
if isinstance(obj, dict): return obj
|
| 623 |
+
if hasattr(obj, "model_dump"): return obj.model_dump()
|
| 624 |
+
return obj
|
| 625 |
+
|
| 626 |
+
# 3. Construct DTO response
|
| 627 |
+
# Final pass ensure everything is JSON serializable
|
| 628 |
+
try:
|
| 629 |
+
from tools.helpers import decode_chart
|
| 630 |
+
self.dto_response = RunnerAPIResponse(
|
| 631 |
+
num_runs=num_runs,
|
| 632 |
+
weeks=weeks,
|
| 633 |
+
features=detailed_features,
|
| 634 |
+
insights=context.insights,
|
| 635 |
+
plan=context.plan,
|
| 636 |
+
risk_assessment=_safe_dump(context.risk_assessment),
|
| 637 |
+
trends=context.trends, # Week specific trend
|
| 638 |
+
period_comparison=context.period_comparison,
|
| 639 |
+
charts=decode_chart(context.charts),
|
| 640 |
+
profile=_safe_dump(context.runner_profile),
|
| 641 |
+
goal_progress=_safe_dump(getattr(context.intelligence_snapshot, 'goal_view', {})),
|
| 642 |
+
goal_trajectory=_safe_dump(context.goal_trajectory),
|
| 643 |
+
positioning=_safe_dump(context.positioning),
|
| 644 |
+
recommendation=_safe_dump(context.recommendation),
|
| 645 |
+
# ✅ KEY CHANGE
|
| 646 |
+
intelligence_snapshot=context.intelligence_snapshot_serialized if hasattr(context, 'intelligence_snapshot_serialized') else serialize_snapshot(context.intelligence_snapshot),
|
| 647 |
+
active_goal=_safe_dump(context.active_goal),
|
| 648 |
+
)
|
| 649 |
+
except Exception as ser_err:
|
| 650 |
+
logger.error(f"Serialization failed in Orchestrator DTO layer: {ser_err}")
|
| 651 |
+
# Fallback to minimal serializable DTO
|
| 652 |
+
self.dto_response = RunnerAPIResponse(
|
| 653 |
+
num_runs=num_runs,
|
| 654 |
+
weeks=weeks,
|
| 655 |
+
features=[],
|
| 656 |
+
insights={"error": "Serialization failed"},
|
| 657 |
+
plan=None,
|
| 658 |
+
risk_assessment=None,
|
| 659 |
+
trends=None,
|
| 660 |
+
period_comparison=None,
|
| 661 |
+
charts={},
|
| 662 |
+
profile=None,
|
| 663 |
+
goal_progress=None,
|
| 664 |
+
goal_trajectory=None,
|
| 665 |
+
positioning=None,
|
| 666 |
+
recommendation=None,
|
| 667 |
+
intelligence_snapshot=None
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
return self.dto_response #.__dict__
|
| 671 |
+
except Exception as e:
|
| 672 |
+
obs_logger.log_event(
|
| 673 |
+
"error",
|
| 674 |
+
f"Orchestration pipeline failed: {e}",
|
| 675 |
+
event="error",
|
| 676 |
+
component=obs_components.ORCHESTRATOR,
|
| 677 |
+
error_type=type(e).__name__,
|
| 678 |
+
error_message=str(e),
|
| 679 |
+
duration_ms=(time.time() - start_time) * 1000,
|
| 680 |
+
)
|
| 681 |
+
raise
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def _persist_analysis(
|
| 685 |
+
self,
|
| 686 |
+
features: List[Dict],
|
| 687 |
+
insights: Any,
|
| 688 |
+
plan: str,
|
| 689 |
+
risk_assessment: RiskAssessment,
|
| 690 |
+
trends: WeeklyTrends,
|
| 691 |
+
):
|
| 692 |
+
"""Helper to persist analysis artefacts safely."""
|
| 693 |
+
if not self.analysis_repo:
|
| 694 |
+
return
|
| 695 |
+
|
| 696 |
+
try:
|
| 697 |
+
# Combine trends and risk assessment into run_summary
|
| 698 |
+
run_summary = trends.model_dump()
|
| 699 |
+
if risk_assessment:
|
| 700 |
+
run_summary["risk_assessment"] = risk_assessment.model_dump()
|
| 701 |
+
|
| 702 |
+
record = AnalysisRecord(
|
| 703 |
+
source_files=[],
|
| 704 |
+
formats=[],
|
| 705 |
+
run_summary=run_summary,
|
| 706 |
+
run_timeseries=features,
|
| 707 |
+
insights_json=(
|
| 708 |
+
insights
|
| 709 |
+
if isinstance(insights, dict)
|
| 710 |
+
else (
|
| 711 |
+
insights.model_dump()
|
| 712 |
+
if hasattr(insights, "model_dump")
|
| 713 |
+
else {"content": str(insights)}
|
| 714 |
+
)
|
| 715 |
+
),
|
| 716 |
+
plan_json={"content": plan},
|
| 717 |
+
route_json={},
|
| 718 |
+
)
|
| 719 |
+
self.analysis_repo.save(record)
|
| 720 |
+
logger.info(f"Analysis persisted with ID: {record.id}")
|
| 721 |
+
except Exception as e:
|
| 722 |
+
logger.warning(f"Failed to persist analysis: {e}")
|
| 723 |
+
|
| 724 |
+
async def chat(self, message: str, language: str = "en") -> Dict[str, Any]:
|
| 725 |
+
"""
|
| 726 |
+
Handles a chat message from the user.
|
| 727 |
+
Returns a dictionary with 'response' (text) and optionally 'chart' (figure).
|
| 728 |
+
"""
|
| 729 |
+
obs_logger.bind_new_trace_id()
|
| 730 |
+
with obs_logger.start_span("orchestrator.chat", obs_components.ORCHESTRATOR):
|
| 731 |
+
start_time = time.time()
|
| 732 |
+
|
| 733 |
+
logger.info(f"Starting chat handling with trace_id: {obs_logger.get_trace_id()}")
|
| 734 |
+
|
| 735 |
+
try:
|
| 736 |
+
session_features = self.feature_store.get_all_features()
|
| 737 |
+
|
| 738 |
+
# Load all historic features to provide a complete picture
|
| 739 |
+
historic_features = []
|
| 740 |
+
if self.run_repo:
|
| 741 |
+
try:
|
| 742 |
+
# Note: RunRepository needs mapping from analyses for now
|
| 743 |
+
# but get_all_features usually returns all from all analyses
|
| 744 |
+
historic_features = (
|
| 745 |
+
self.analysis_repo.get_all_features()
|
| 746 |
+
if hasattr(self.analysis_repo, "get_all_features")
|
| 747 |
+
else []
|
| 748 |
+
)
|
| 749 |
+
except Exception as e:
|
| 750 |
+
logger.warning(f"Failed to load historic features: {e}")
|
| 751 |
+
|
| 752 |
+
# Merge and deduplicate
|
| 753 |
+
all_features_map = {f.get("id") or f.get("start_time"): f for f in historic_features}
|
| 754 |
+
for f in session_features:
|
| 755 |
+
key = f.get("id") or f.get("start_time")
|
| 756 |
+
all_features_map[key] = f
|
| 757 |
+
|
| 758 |
+
all_features = list(all_features_map.values())
|
| 759 |
+
|
| 760 |
+
# Issue #30: Auto-inject last 4 insights if no new upload in this session
|
| 761 |
+
auto_injected_insights = []
|
| 762 |
+
is_no_upload = not session_features
|
| 763 |
+
|
| 764 |
+
if is_no_upload and self.analysis_repo:
|
| 765 |
+
try:
|
| 766 |
+
# Fetch exactly 4 per requirement
|
| 767 |
+
auto_injected_insights = self.analysis_repo.get_recent_insights(limit=4)
|
| 768 |
+
logger.info(
|
| 769 |
+
f"Auto-injected {len(auto_injected_insights)} recent insights (no upload detected)"
|
| 770 |
+
)
|
| 771 |
+
except Exception as e:
|
| 772 |
+
logger.warning(f"Failed to load auto-injected insights: {e}")
|
| 773 |
+
|
| 774 |
+
# Global "No Data" check
|
| 775 |
+
if not all_features and not auto_injected_insights:
|
| 776 |
+
return {
|
| 777 |
+
"response": "I don't have any running data from you yet. Please upload your .tcx / .fit files so I can help you!"
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
# Sort features by date descending (most recent first)
|
| 781 |
+
def get_date(f):
|
| 782 |
+
dt = f.get("start_time")
|
| 783 |
+
if isinstance(dt, str):
|
| 784 |
+
try:
|
| 785 |
+
return datetime.fromisoformat(dt.replace("Z", "+00:00")).date()
|
| 786 |
+
except:
|
| 787 |
+
return date.min
|
| 788 |
+
elif isinstance(dt, datetime):
|
| 789 |
+
return dt.date()
|
| 790 |
+
elif isinstance(dt, date):
|
| 791 |
+
return dt
|
| 792 |
+
return date.min
|
| 793 |
+
|
| 794 |
+
all_features = sorted(all_features, key=get_date, reverse=True)
|
| 795 |
+
|
| 796 |
+
# 1. Route the request using the new structured router
|
| 797 |
+
decision = await async_route(
|
| 798 |
+
message, raw_context={"features": all_features}, llm_client=self.router_client
|
| 799 |
+
)
|
| 800 |
+
intent = decision.route
|
| 801 |
+
logger.info(f"[ORCH] Router: route={intent}, metric={decision.metric}, period={decision.period}, target_date={decision.target_date}")
|
| 802 |
+
|
| 803 |
+
if intent == "CHART":
|
| 804 |
+
# 1. Handle explicit 'this month' or similar periods
|
| 805 |
+
if decision.period and "month" in decision.period.lower():
|
| 806 |
+
month_start = date.today().replace(day=1)
|
| 807 |
+
all_features = [f for f in all_features if get_date(f) >= month_start]
|
| 808 |
+
logger.info(f"[ORCH] Filtered to {len(all_features)} runs for current month (start: {month_start})")
|
| 809 |
+
|
| 810 |
+
# 2. Handle default 30-day window for null/unrecognized periods
|
| 811 |
+
elif not decision.period or decision.period in [None, "", "null", "none", "last_30_days"]:
|
| 812 |
+
if not decision.target_date: # target_date overrides the 30-day default
|
| 813 |
+
one_month_ago = date.today() - timedelta(days=30)
|
| 814 |
+
all_features = [f for f in all_features if get_date(f) >= one_month_ago]
|
| 815 |
+
logger.info(f"[ORCH] No specific period. Defaulted to 30-day filter (count: {len(all_features)})")
|
| 816 |
+
|
| 817 |
+
# 3. 'all_time' remains unfiltered
|
| 818 |
+
elif decision.period == "all_time":
|
| 819 |
+
logger.info(f"[ORCH] 'all_time' requested. Using all {len(all_features)} runs.")
|
| 820 |
+
|
| 821 |
+
if intent == "CHART":
|
| 822 |
+
# Smart retrieval: Check if it's a specific week matched by the router
|
| 823 |
+
if decision.target_date:
|
| 824 |
+
snapshot = self.get_intelligence_snapshot(decision.target_date, language=language)
|
| 825 |
+
if snapshot and snapshot.charts:
|
| 826 |
+
# Normalize metric name for snapshot lookup
|
| 827 |
+
metric_map = {
|
| 828 |
+
"heart_rate": "hr",
|
| 829 |
+
"heartrate": "hr",
|
| 830 |
+
"hr": "hr",
|
| 831 |
+
"pace": "pace",
|
| 832 |
+
"distance": "volume",
|
| 833 |
+
"volume": "volume",
|
| 834 |
+
"frequency": "frequency",
|
| 835 |
+
}
|
| 836 |
+
norm_metric = metric_map.get(decision.metric.lower(), "pace") if decision.metric else "pace"
|
| 837 |
+
target_key = f"{norm_metric}_chart"
|
| 838 |
+
fig = snapshot.charts.get(target_key) or snapshot.charts.get("pace_chart")
|
| 839 |
+
|
| 840 |
+
if fig:
|
| 841 |
+
obs_logger.log_event("info", "Snapshot chart cache hit", event="snapshot_cache_hit", week=str(decision.target_date))
|
| 842 |
+
return {
|
| 843 |
+
"response": f"Here is the {decision.metric or 'pace'} chart from the week of {decision.target_date}.",
|
| 844 |
+
"chart": fig,
|
| 845 |
+
}
|
| 846 |
+
|
| 847 |
+
# Fallback to Visualization Agent for multi-week or custom charts
|
| 848 |
+
chart_specs = await self.visualization_agent.run(all_features, query=message, intent=decision)
|
| 849 |
+
|
| 850 |
+
if chart_specs:
|
| 851 |
+
# Render the best match for the chat interface
|
| 852 |
+
spec = chart_specs[0]
|
| 853 |
+
fig = self.viz_executor.render_chart(spec, all_features)
|
| 854 |
+
return {
|
| 855 |
+
"response": f"I've generated a {spec.chart_type} chart based on your request.",
|
| 856 |
+
"chart": fig,
|
| 857 |
+
}
|
| 858 |
+
else:
|
| 859 |
+
return {
|
| 860 |
+
"response": "I couldn't find a way to visualize those metrics with your current data. Could you try asking for a summary instead?"
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
else:
|
| 864 |
+
# Delegate to Chat Agent
|
| 865 |
+
summary = self.latest_summary
|
| 866 |
+
trends = self.latest_trends
|
| 867 |
+
|
| 868 |
+
# If session state is raw dict (e.g. from previous run before refactor),
|
| 869 |
+
# convert to model or fallback to building from snapshots
|
| 870 |
+
if isinstance(summary, dict) or isinstance(trends, dict) or not summary:
|
| 871 |
+
summary, trends = self.snapshot_service.build_agent_context()
|
| 872 |
+
|
| 873 |
+
latest_plan = self.latest_plan or self.plan_store.get_latest_plan()
|
| 874 |
+
current_insights = self.latest_insights
|
| 875 |
+
|
| 876 |
+
# Maintain the 10-limit historic context for general memory if needed,
|
| 877 |
+
# but we'll use auto_injected_insights specifically for the prompt injection.
|
| 878 |
+
historic_insights = []
|
| 879 |
+
if self.analysis_repo:
|
| 880 |
+
try:
|
| 881 |
+
historic_insights = self.analysis_repo.get_recent_insights(limit=10)
|
| 882 |
+
except Exception as e:
|
| 883 |
+
logger.warning(f"Failed to load historic insights: {e}")
|
| 884 |
+
|
| 885 |
+
response_text = await self.chat_agent.run(
|
| 886 |
+
message,
|
| 887 |
+
context={
|
| 888 |
+
"features": all_features,
|
| 889 |
+
"insights": current_insights,
|
| 890 |
+
"plan": latest_plan,
|
| 891 |
+
"summary": summary,
|
| 892 |
+
"historic_insights": historic_insights,
|
| 893 |
+
"auto_injected_insights": auto_injected_insights,
|
| 894 |
+
"is_no_upload": is_no_upload,
|
| 895 |
+
},
|
| 896 |
+
language=language,
|
| 897 |
+
)
|
| 898 |
+
return {"response": response_text}
|
| 899 |
+
|
| 900 |
+
except Exception as e:
|
| 901 |
+
return {"response": f"I encountered an error: {str(e)}"}
|
| 902 |
+
|
| 903 |
+
def load_last_analysis(self) -> Optional[AnalysisRecord]:
|
| 904 |
+
"""Debug helper to load the last persisted analysis."""
|
| 905 |
+
if not self.analysis_repo:
|
| 906 |
+
return None
|
| 907 |
+
return self.analysis_repo.get_last_analysis()
|
| 908 |
+
|
| 909 |
+
async def generate_performance_card(
|
| 910 |
+
self, snapshot: Any, trend: Any, language: str = "en"
|
| 911 |
+
) -> Any:
|
| 912 |
+
# Pydantic support for dict inputs from UI
|
| 913 |
+
if isinstance(snapshot, dict):
|
| 914 |
+
snapshot = WeeklySnapshot.model_validate(snapshot)
|
| 915 |
+
if isinstance(trend, dict):
|
| 916 |
+
trend = WeeklyTrend.model_validate(trend)
|
| 917 |
+
|
| 918 |
+
"""
|
| 919 |
+
Centralized entry point for generating performance cards.
|
| 920 |
+
Enforces architecture boundary by keeping BriefService internal.
|
| 921 |
+
"""
|
| 922 |
+
obs_logger.ensure_trace()
|
| 923 |
+
with obs_logger.start_span("orchestrator.performance_card", component=obs_components.ORCHESTRATOR):
|
| 924 |
+
from services.performance_card_service import PerformanceCardService
|
| 925 |
+
|
| 926 |
+
# Boundary Enforcement: Orchestrator owns the nested span and event emission
|
| 927 |
+
with obs_logger.start_span("performance_card.generate", component=obs_components.APPLICATION):
|
| 928 |
+
service = PerformanceCardService()
|
| 929 |
+
card = await service.generate(
|
| 930 |
+
snapshot=snapshot,
|
| 931 |
+
trend=trend,
|
| 932 |
+
brief_service=self.brief_service,
|
| 933 |
+
language=language,
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
obs_logger.log_event(
|
| 937 |
+
"info",
|
| 938 |
+
"Performance card generated successfully",
|
| 939 |
+
event="performance_card_generated",
|
| 940 |
+
component=obs_components.ORCHESTRATOR,
|
| 941 |
+
**{
|
| 942 |
+
"week_start": str(snapshot.week_start_date),
|
| 943 |
+
"comparison_available": trend.comparison_available,
|
| 944 |
+
"comparison_type": trend.comparison_type,
|
| 945 |
+
"llm_used": card.llm_used, # Use actual execution status
|
| 946 |
+
},
|
| 947 |
+
)
|
| 948 |
+
return card
|
| 949 |
+
|
| 950 |
+
async def generate_runner_positioning(
|
| 951 |
+
self, snapshot: Any, trend: Any, language: str = "en"
|
| 952 |
+
) -> Any:
|
| 953 |
+
# Pydantic support for dict inputs from UI
|
| 954 |
+
if isinstance(snapshot, dict):
|
| 955 |
+
snapshot = WeeklySnapshot.model_validate(snapshot)
|
| 956 |
+
if isinstance(trend, dict):
|
| 957 |
+
trend = WeeklyTrend.model_validate(trend)
|
| 958 |
+
|
| 959 |
+
"""
|
| 960 |
+
Generates runner positioning assessment (v1).
|
| 961 |
+
Uses session memory to avoid recomputation if possible.
|
| 962 |
+
"""
|
| 963 |
+
obs_logger.ensure_trace()
|
| 964 |
+
with obs_logger.start_span("positioning.generate", component=obs_components.APPLICATION):
|
| 965 |
+
# 1. Check View Cache
|
| 966 |
+
if snapshot.week_start_date in self.session_view_positioning:
|
| 967 |
+
return self.session_view_positioning[snapshot.week_start_date]
|
| 968 |
+
|
| 969 |
+
# 2. Recompute if missing (e.g. initial load without new analysis)
|
| 970 |
+
positioning = self.positioning_engine.compute(snapshot, trend)
|
| 971 |
+
self.session_view_positioning[snapshot.week_start_date] = positioning
|
| 972 |
+
|
| 973 |
+
# 3. Emit Event (STEP 10)
|
| 974 |
+
obs_logger.log_event(
|
| 975 |
+
"info",
|
| 976 |
+
"Runner positioning computed",
|
| 977 |
+
event="positioning_computed",
|
| 978 |
+
component=obs_components.APPLICATION,
|
| 979 |
+
week=str(snapshot.week_start_date),
|
| 980 |
+
status=positioning.status, # WeeklyPositioning has 'status'
|
| 981 |
+
signal_strength=str(positioning.signal_strength)
|
| 982 |
+
)
|
| 983 |
+
|
| 984 |
+
return positioning
|
| 985 |
+
|
| 986 |
+
def get_intelligence_snapshot(self, week_start: Optional[date] = None, language: str = "en") -> Optional[RunnerIntelligenceSnapshot]:
|
| 987 |
+
"""
|
| 988 |
+
Retrieves the intelligence snapshot for a specific week and language.
|
| 989 |
+
If week_start is not provided or not found, falls back to the most recent snapshot for THAT language.
|
| 990 |
+
"""
|
| 991 |
+
if not self.intelligence_snapshots:
|
| 992 |
+
logger.debug("[Orchestrator] No snapshots available in store.")
|
| 993 |
+
return None
|
| 994 |
+
|
| 995 |
+
# 1. Exact match (Week + Language)
|
| 996 |
+
if week_start and (week_start, language) in self.intelligence_snapshots:
|
| 997 |
+
logger.debug(f"[Orchestrator] Found exact snapshot match for {week_start} ({language})")
|
| 998 |
+
return self.intelligence_snapshots[(week_start, language)]
|
| 999 |
+
|
| 1000 |
+
# 2. Defensive check: filter for valid tuple keys to avoid TypeError on legacy date-only keys
|
| 1001 |
+
tuple_keys = [k for k in self.intelligence_snapshots.keys() if isinstance(k, tuple) and len(k) == 2]
|
| 1002 |
+
|
| 1003 |
+
# 3. If week_start exists but in different language, we return None
|
| 1004 |
+
# to force on-demand recomputation for the new language.
|
| 1005 |
+
week_exists_other_lang = any(k[0] == week_start for k in tuple_keys)
|
| 1006 |
+
if week_start and week_exists_other_lang:
|
| 1007 |
+
logger.debug(f"[Orchestrator] Week {week_start} found but not in {language}. Returning None to trigger recompute.")
|
| 1008 |
+
return None
|
| 1009 |
+
|
| 1010 |
+
# 4. Fallback to latest available for THIS language
|
| 1011 |
+
lang_snapshots = {k[0]: v for k, v in self.intelligence_snapshots.items()
|
| 1012 |
+
if isinstance(k, tuple) and len(k) == 2 and k[1] == language}
|
| 1013 |
+
if lang_snapshots:
|
| 1014 |
+
sorted_weeks = sorted(lang_snapshots.keys(), reverse=True)
|
| 1015 |
+
latest_week = sorted_weeks[0]
|
| 1016 |
+
logger.debug(f"[Orchestrator] Fallback to latest {language} snapshot [DISABLED]: {latest_week}")
|
| 1017 |
+
return None #lang_snapshots[latest_week]
|
| 1018 |
+
|
| 1019 |
+
# 5. Global fallback if nothing found for this language
|
| 1020 |
+
logger.debug(f"[Orchestrator] No snapshot found for {language}. Returning None.")
|
| 1021 |
+
return None
|
| 1022 |
+
|
| 1023 |
+
def _is_snapshot_complete(self, snapshot) -> bool:
|
| 1024 |
+
return all([
|
| 1025 |
+
snapshot.weekly_snapshot is not None,
|
| 1026 |
+
snapshot.weekly_trend is not None,
|
| 1027 |
+
snapshot.positioning_view is not None,
|
| 1028 |
+
snapshot.plan is not None,
|
| 1029 |
+
snapshot.recommendation is not None,
|
| 1030 |
+
snapshot.evidence_view is not None,
|
| 1031 |
+
#snapshot.charts is not None, #TODO: re-enable when visualization is fixed
|
| 1032 |
+
])
|
| 1033 |
+
|
| 1034 |
+
async def get_or_build_intelligence_snapshot(
|
| 1035 |
+
self,
|
| 1036 |
+
week_start: date,
|
| 1037 |
+
language: str = "en"
|
| 1038 |
+
) -> Optional[RunnerIntelligenceSnapshot]:
|
| 1039 |
+
"""
|
| 1040 |
+
Lazy-load intelligence snapshot using cache-aside pattern.
|
| 1041 |
+
|
| 1042 |
+
- Returns cached snapshot if available
|
| 1043 |
+
- Otherwise builds it on-demand for the requested week
|
| 1044 |
+
"""
|
| 1045 |
+
|
| 1046 |
+
# 1. Fast Path: Cache-Aside Lookup
|
| 1047 |
+
# We only reuse if it actually HAS intelligence (not a shell snapshot)
|
| 1048 |
+
existing = self.get_intelligence_snapshot(week_start, language=language)
|
| 1049 |
+
if existing:
|
| 1050 |
+
if not self._is_snapshot_complete(existing):
|
| 1051 |
+
logger.debug(f"[Snapshot] Rebuilding week: {week_start} (incomplete snapshot)")
|
| 1052 |
+
else:
|
| 1053 |
+
obs_logger.log_event(
|
| 1054 |
+
"info",
|
| 1055 |
+
"Intelligence snapshot cache hit (active)",
|
| 1056 |
+
event="intelligence_cache_hit",
|
| 1057 |
+
week=str(week_start),
|
| 1058 |
+
language=language
|
| 1059 |
+
)
|
| 1060 |
+
logger.debug(f"[Snapshot] Using cached snapshot: {week_start}")
|
| 1061 |
+
return existing
|
| 1062 |
+
else:
|
| 1063 |
+
logger.debug(f"[Snapshot] Rebuilding week: {week_start} (not found)")
|
| 1064 |
+
|
| 1065 |
+
# 2. Guardrail → no data, no intelligence
|
| 1066 |
+
weekly = self.weekly_repo.get_by_week(week_start) if self.weekly_repo else None
|
| 1067 |
+
if not weekly:
|
| 1068 |
+
return None
|
| 1069 |
+
|
| 1070 |
+
# 3. Build agent context for this specific week
|
| 1071 |
+
# We need to ensure we have the runner_id correctly
|
| 1072 |
+
runner_id = weekly.runner_id if hasattr(weekly, "runner_id") else uuid.UUID(config.DEFAULT_RUNNER_ID)
|
| 1073 |
+
|
| 1074 |
+
# 3.1 Fetch runs for this week
|
| 1075 |
+
week_runs = []
|
| 1076 |
+
if self.run_repo:
|
| 1077 |
+
week_runs = self.run_repo.get_runs_for_week(str(runner_id), week_start)
|
| 1078 |
+
|
| 1079 |
+
summary, trends = self.snapshot_service.build_agent_context_for_week(target_date=week_start)
|
| 1080 |
+
|
| 1081 |
+
if not summary:
|
| 1082 |
+
return None
|
| 1083 |
+
|
| 1084 |
+
# Create context specifically for this lazy orchestration
|
| 1085 |
+
# 3. Prepare context for lazy built
|
| 1086 |
+
active_goal = self.goal_service.get_active_goal(self.runner_profile.runner_id)
|
| 1087 |
+
|
| 1088 |
+
# We reuse global state for consistency
|
| 1089 |
+
context = PipelineContext(
|
| 1090 |
+
runner_profile=self.runner_profile,
|
| 1091 |
+
runs=week_runs,
|
| 1092 |
+
sessions=self.session_snapshots, # Fallback to in-memory snapshots if persistent repo fails
|
| 1093 |
+
language=language,
|
| 1094 |
+
enable_intelligence=True,
|
| 1095 |
+
target_monday=week_start,
|
| 1096 |
+
summary=summary,
|
| 1097 |
+
trends=trends,
|
| 1098 |
+
active_goal=active_goal,
|
| 1099 |
+
last_runner_positioning=self._last_runner_positioning,
|
| 1100 |
+
last_positioning=self._last_positioning,
|
| 1101 |
+
last_insights={},
|
| 1102 |
+
last_plan=None,
|
| 1103 |
+
last_focus=self._last_focus,
|
| 1104 |
+
visualization_scope="weekly",
|
| 1105 |
+
intelligence_cache={}
|
| 1106 |
+
)
|
| 1107 |
+
context.weekly_snapshot = weekly
|
| 1108 |
+
|
| 1109 |
+
# Build WeeklyTrend for this week
|
| 1110 |
+
all_history = self.snapshot_service.get_history(12)
|
| 1111 |
+
hist_before = [h for h in all_history if h.week_start_date < week_start]
|
| 1112 |
+
context.weekly_trend = self.weekly_trend_builder.build(weekly, hist_before)
|
| 1113 |
+
|
| 1114 |
+
obs_logger.log_event(
|
| 1115 |
+
"info",
|
| 1116 |
+
f"Lazy intelligence generation triggered for {week_start}",
|
| 1117 |
+
component=obs_components.APPLICATION,
|
| 1118 |
+
week=str(week_start),
|
| 1119 |
+
cached=False
|
| 1120 |
+
)
|
| 1121 |
+
|
| 1122 |
+
# 4. Run Intelligence-related steps
|
| 1123 |
+
await self._run_intelligence_for_context(context, enable_intelligence=True)
|
| 1124 |
+
|
| 1125 |
+
# 5. Build snapshot
|
| 1126 |
+
snapshot = build_intelligence_snapshot(context)
|
| 1127 |
+
|
| 1128 |
+
# 6. Store in cache and persistence
|
| 1129 |
+
self.snapshot_service.store_snapshot(week_start, snapshot, language=language)
|
| 1130 |
+
self.intelligence_snapshots[(week_start, language)] = snapshot
|
| 1131 |
+
|
| 1132 |
+
return snapshot
|
| 1133 |
+
|
| 1134 |
+
async def _run_intelligence_for_context(self, context, enable_intelligence: bool = True):
|
| 1135 |
+
"""
|
| 1136 |
+
Runs only intelligence-related steps for a prepared context.
|
| 1137 |
+
This avoids re-running full ingestion pipeline.
|
| 1138 |
+
"""
|
| 1139 |
+
# Reproduce EXACT dependency injection logic from run()
|
| 1140 |
+
intelligence_steps = [
|
| 1141 |
+
GuardrailStep(self.guardrail_agent),
|
| 1142 |
+
ComparisonStep(self.weekly_repo),
|
| 1143 |
+
PersistAnalysisStep(self.analysis_repo),
|
| 1144 |
+
PositioningStep(self.positioning_service, self.recommendation_service, self.goal_progress_service, self.goal_service),
|
| 1145 |
+
GoalTrajectoryStep(self.goal_service, self.goal_trajectory_service),
|
| 1146 |
+
]
|
| 1147 |
+
|
| 1148 |
+
if enable_intelligence:
|
| 1149 |
+
intelligence_steps.extend([
|
| 1150 |
+
IntelligenceStep(
|
| 1151 |
+
self.insights_agent,
|
| 1152 |
+
self.plan_agent,
|
| 1153 |
+
self.brief_service,
|
| 1154 |
+
self.weekly_repo,
|
| 1155 |
+
self.guardrail_arbitration_service,
|
| 1156 |
+
self.positioning_change_service,
|
| 1157 |
+
self.goal_progress_service,
|
| 1158 |
+
self.goal_service
|
| 1159 |
+
),
|
| 1160 |
+
VisualizationStep(self.visualization_agent, self.viz_executor)
|
| 1161 |
+
])
|
| 1162 |
+
|
| 1163 |
+
for step in intelligence_steps:
|
| 1164 |
+
await step.run(context)
|
| 1165 |
+
|
| 1166 |
+
|
| 1167 |
+
async def get_orchestrator(use_RunnerOrchestrator: bool = True):
|
| 1168 |
+
orchestrator = RunnerOrchestrator()
|
| 1169 |
+
await orchestrator._initialize()
|
| 1170 |
+
return orchestrator
|
src/agents/plan/agent.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, Optional
|
| 2 |
+
import logging
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
from observability import logger as obs_logger
|
| 6 |
+
from observability import components as obs_components
|
| 7 |
+
|
| 8 |
+
from domain.training.agent_models import PlanOutput, WeeklySummary
|
| 9 |
+
from domain.runner.profile import RunnerProfile
|
| 10 |
+
from domain.runner.goal import Goal
|
| 11 |
+
from llm.base import LLMClient
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class PlanAgent:
|
| 18 |
+
"""
|
| 19 |
+
Generates a weekly plan using an LLM based on recent mileage and consistency via the provided LLMClient.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, llm_client: LLMClient):
|
| 23 |
+
self.llm_client = llm_client
|
| 24 |
+
self.instruction = self._load_instruction("en")
|
| 25 |
+
|
| 26 |
+
def _load_instruction(self, language: str = "en") -> str:
|
| 27 |
+
try:
|
| 28 |
+
# Resolve path relative to this file: src/agents/plan/../../prompts/
|
| 29 |
+
base_path = Path(__file__).parent.parent.parent / "prompts"
|
| 30 |
+
filename = f"plan_{language}.txt"
|
| 31 |
+
file_path = base_path / filename
|
| 32 |
+
|
| 33 |
+
if not file_path.exists():
|
| 34 |
+
logger.warning(f"Prompt file not found: {file_path}. Falling back to English.")
|
| 35 |
+
file_path = base_path / "plan_en.txt"
|
| 36 |
+
|
| 37 |
+
if not file_path.exists():
|
| 38 |
+
logger.error("English prompt file missing!")
|
| 39 |
+
return "You are generating a weekly running plan. Output JSON."
|
| 40 |
+
|
| 41 |
+
return file_path.read_text(encoding="utf-8")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.error(f"Error loading prompt for language {language}: {e}")
|
| 44 |
+
return "You are generating a weekly running plan. Output JSON."
|
| 45 |
+
|
| 46 |
+
async def run(
|
| 47 |
+
self,
|
| 48 |
+
features: WeeklySummary,
|
| 49 |
+
language: str = "en",
|
| 50 |
+
profile: Optional[RunnerProfile] = None,
|
| 51 |
+
goal: Optional[Goal] = None,
|
| 52 |
+
) -> Dict[str, Any]:
|
| 53 |
+
with obs_logger.start_span("plan_agent.run", obs_components.AGENT):
|
| 54 |
+
start_time = time.time()
|
| 55 |
+
# Load language-specific instruction
|
| 56 |
+
self.instruction = self._load_instruction(language)
|
| 57 |
+
|
| 58 |
+
# Take most recent week from WeeklySummary
|
| 59 |
+
if features.weekly_km:
|
| 60 |
+
# take most recent week by sorting keys (YYYY-WW)
|
| 61 |
+
weeks = sorted(features.weekly_km.items(), reverse=True)
|
| 62 |
+
recent_km = weeks[0][1]
|
| 63 |
+
else:
|
| 64 |
+
recent_km = 0.0
|
| 65 |
+
|
| 66 |
+
cons = features.consistency_score
|
| 67 |
+
|
| 68 |
+
# Construct Prompt
|
| 69 |
+
prompt = self._construct_prompt(
|
| 70 |
+
recent_km, cons, language=language, profile=profile, goal=goal
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
# Call LLM via Client
|
| 75 |
+
with obs_logger.start_span("plan_agent.llm", obs_components.AGENT):
|
| 76 |
+
plan_output = await self.llm_client.generate(
|
| 77 |
+
prompt, instruction=self.instruction, schema=None, name="plan_agent"
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
result = None
|
| 81 |
+
if isinstance(plan_output, PlanOutput):
|
| 82 |
+
result = {"plan": plan_output.plan}
|
| 83 |
+
elif isinstance(plan_output, dict):
|
| 84 |
+
if "plan" in plan_output:
|
| 85 |
+
if isinstance(plan_output["plan"], str):
|
| 86 |
+
result = {"plan": plan_output["plan"]}
|
| 87 |
+
else:
|
| 88 |
+
result = {"plan": json.dumps(plan_output)}
|
| 89 |
+
else:
|
| 90 |
+
result = {"plan": json.dumps({"plan": plan_output})}
|
| 91 |
+
elif isinstance(plan_output, str):
|
| 92 |
+
raw = plan_output.strip()
|
| 93 |
+
result = {"plan": raw}
|
| 94 |
+
else:
|
| 95 |
+
logger.error(f"Unexpected response type from LLM: {type(plan_output)}")
|
| 96 |
+
fail_msg = (
|
| 97 |
+
"Não foi possível gerar o plano neste momento."
|
| 98 |
+
if language == "pt-BR"
|
| 99 |
+
else "Could not generate plan at this time."
|
| 100 |
+
)
|
| 101 |
+
result = {"plan": fail_msg}
|
| 102 |
+
|
| 103 |
+
return result
|
| 104 |
+
|
| 105 |
+
except Exception as e:
|
| 106 |
+
duration_ms = (time.time() - start_time) * 1000
|
| 107 |
+
obs_logger.log_event(
|
| 108 |
+
"error",
|
| 109 |
+
f"Failed to generate plan: {e}",
|
| 110 |
+
event="error",
|
| 111 |
+
component=obs_components.AGENT,
|
| 112 |
+
duration_ms=duration_ms,
|
| 113 |
+
)
|
| 114 |
+
logger.error(f"Failed to generate plan with LLM: {e}", exc_info=True)
|
| 115 |
+
fail_msg = (
|
| 116 |
+
"Não foi possível gerar o plano neste momento devido a um erro."
|
| 117 |
+
if language == "pt-BR"
|
| 118 |
+
else "Could not generate plan at this time due to an error."
|
| 119 |
+
)
|
| 120 |
+
return {"plan": fail_msg}
|
| 121 |
+
|
| 122 |
+
def _construct_prompt(
|
| 123 |
+
self,
|
| 124 |
+
recent_km: float,
|
| 125 |
+
consistency: int,
|
| 126 |
+
language: str = "en",
|
| 127 |
+
profile: Optional[RunnerProfile] = None,
|
| 128 |
+
goal: Optional[Goal] = None,
|
| 129 |
+
) -> str:
|
| 130 |
+
is_pt = language == "pt-BR"
|
| 131 |
+
profile_context = ""
|
| 132 |
+
if profile:
|
| 133 |
+
profile_context = "\n**Runner Profile Context:**\n"
|
| 134 |
+
if profile.runner_display_name:
|
| 135 |
+
profile_context += f"- Display Name: {profile.runner_display_name}\n"
|
| 136 |
+
if profile.age:
|
| 137 |
+
profile_context += f"- Age: {profile.age}\n"
|
| 138 |
+
if profile.experience_level:
|
| 139 |
+
profile_context += f"- Experience Level: {profile.experience_level}\n"
|
| 140 |
+
if profile.baseline_weekly_km:
|
| 141 |
+
profile_context += f"- Baseline Weekly KM: {profile.baseline_weekly_km}\n"
|
| 142 |
+
if profile.injury_history_notes:
|
| 143 |
+
profile_context += f"- Injury Notes: {profile.injury_history_notes}\n"
|
| 144 |
+
|
| 145 |
+
if goal:
|
| 146 |
+
goal_type_label = goal.type.replace("_", " ").title()
|
| 147 |
+
date_str = goal.target_date.strftime("%Y-%m-%d") if goal.target_date else "N/A"
|
| 148 |
+
profile_context += f"\n**Current Active Goal:**\n"
|
| 149 |
+
profile_context += f"- Type: {goal_type_label}\n"
|
| 150 |
+
profile_context += f"- Target: {goal.target_value} {goal.unit}\n"
|
| 151 |
+
profile_context += f"- Target Date: {date_str}\n"
|
| 152 |
+
|
| 153 |
+
if is_pt:
|
| 154 |
+
return f"""
|
| 155 |
+
**Perfil do Corredor:**
|
| 156 |
+
{profile_context}
|
| 157 |
+
- Quilometragem Semanal Recente: {recent_km:.1f} km
|
| 158 |
+
- Score de Consistência: {consistency}/100
|
| 159 |
+
"""
|
| 160 |
+
return f"""
|
| 161 |
+
**Runner Profile:**
|
| 162 |
+
{profile_context}
|
| 163 |
+
- Recent Weekly Mileage: {recent_km:.1f} km
|
| 164 |
+
- Consistency Score: {consistency}/100
|
| 165 |
+
"""
|
src/agents/visualization/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .agent import VisualizationAgent
|
| 2 |
+
|
| 3 |
+
__all__ = ["VisualizationAgent"]
|
src/agents/visualization/agent.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Any, Optional
|
| 2 |
+
import logging
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
from observability import logger as obs_logger
|
| 6 |
+
from observability import components as obs_components
|
| 7 |
+
from agents.base import BaseAgent
|
| 8 |
+
from domain.training.charts import ChartSpec, ChartType
|
| 9 |
+
from llm.base import LLMClient
|
| 10 |
+
from router.models import RouteDecision
|
| 11 |
+
from router.compact import compact_raw_context
|
| 12 |
+
|
| 13 |
+
from pydantic import BaseModel, Field
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class VisualizationAgentOutput(BaseModel):
|
| 19 |
+
"""The structured output from the VisualizationAgent."""
|
| 20 |
+
charts: List[ChartSpec] = Field(description="The list of charts to generate.")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class VisualizationAgent(BaseAgent):
|
| 24 |
+
"""
|
| 25 |
+
Agent responsible for specifying visualizations from run data using LLM.
|
| 26 |
+
Returns ChartSpec objects instead of executing code.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, llm_client: LLMClient):
|
| 30 |
+
self.llm_client = llm_client
|
| 31 |
+
self.instruction = """You are a data visualization expert. Your goal is to specify charts to visualize run data.
|
| 32 |
+
|
| 33 |
+
Return a JSON object containing a 'charts' list. Each chart in the list must have:
|
| 34 |
+
- 'chart_type': One of ['pace', 'heart_rate', 'volume', 'frequency', 'zones', 'dynamic']
|
| 35 |
+
- 'title': A descriptive title for the chart.
|
| 36 |
+
- 'params': (Optional) A dictionary of parameters.
|
| 37 |
+
|
| 38 |
+
Chart Types:
|
| 39 |
+
- 'pace': Pace vs Date
|
| 40 |
+
- 'heart_rate': Heart Rate vs Date
|
| 41 |
+
- 'volume': Weekly mileage (distance)
|
| 42 |
+
- 'frequency': Number of runs per week
|
| 43 |
+
- 'zones': Distribution of Effort/HR zones
|
| 44 |
+
- 'dynamic': Custom chart with provided 'labels', 'values', and 'type' (bar/pie/line) in params.
|
| 45 |
+
|
| 46 |
+
When asked to generate charts:
|
| 47 |
+
1. If you need a custom chart (not pace/hr/volume/freq/zones), use 'dynamic' and specify labels and values in 'params'.
|
| 48 |
+
Example params for dynamic: {"labels": ["Mon", "Tue"], "values": [5, 10], "type": "bar"}
|
| 49 |
+
2. Consider the 'periods' (n_weeks, etc.) provided in the extracted intent.
|
| 50 |
+
3. Default to showing the last 4 weeks of data unless 'all_time' or a specific range is requested.
|
| 51 |
+
4. Always include 'pace' and 'heart_rate' charts by default if no specific query is provided.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
async def run(
|
| 56 |
+
self,
|
| 57 |
+
features: List[Dict[str, Any]],
|
| 58 |
+
query: str = "",
|
| 59 |
+
intent: Optional[RouteDecision] = None
|
| 60 |
+
) -> List[ChartSpec]:
|
| 61 |
+
with obs_logger.start_span("visualization_agent.run", obs_components.AGENT):
|
| 62 |
+
"""
|
| 63 |
+
Generates chart specifications using the LLM.
|
| 64 |
+
"""
|
| 65 |
+
intent_info = ""
|
| 66 |
+
if intent:
|
| 67 |
+
intent_info = f"\nExtracted Intent: Metric={intent.metric}, Period={intent.period}, Target Date={intent.target_date}"
|
| 68 |
+
|
| 69 |
+
if query:
|
| 70 |
+
# Use compact_raw_context for prompt injection as per user feedback
|
| 71 |
+
compacted_features = compact_raw_context({"features": features}, max_elements=25).get("features", [])
|
| 72 |
+
|
| 73 |
+
prompt = f"""
|
| 74 |
+
The user has requested a specific visualization: "{query}"{intent_info}
|
| 75 |
+
There are {len(features)} runs available in history.
|
| 76 |
+
|
| 77 |
+
**Relevant Data Context (Compacted):**
|
| 78 |
+
{json.dumps(compacted_features, default=str, indent=2)}
|
| 79 |
+
|
| 80 |
+
ACTION: Please specify the most appropriate chart(s) according to the schema.
|
| 81 |
+
If they ask for frequency or "by day", use 'frequency'.
|
| 82 |
+
"""
|
| 83 |
+
else:
|
| 84 |
+
prompt = f"""
|
| 85 |
+
Generate default visualizations for the provided {len(features)} runs.
|
| 86 |
+
Please request:
|
| 87 |
+
1. A pace chart ('pace')
|
| 88 |
+
2. A heart rate chart ('heart_rate')
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
res = await self.llm_client.generate(
|
| 93 |
+
prompt,
|
| 94 |
+
instruction=self.instruction,
|
| 95 |
+
schema=VisualizationAgentOutput,
|
| 96 |
+
name="visualization_agent",
|
| 97 |
+
)
|
| 98 |
+
logger.info(f"[VIZ DEBUG] LLM raw response type: {type(res)}")
|
| 99 |
+
if isinstance(res, VisualizationAgentOutput):
|
| 100 |
+
logger.info(f"[VIZ DEBUG] Specs generated: {len(res.charts)}")
|
| 101 |
+
return res.charts
|
| 102 |
+
|
| 103 |
+
logger.warning(f"[VIZ DEBUG] LLM returned non-schema response: {res}")
|
| 104 |
+
return []
|
| 105 |
+
except Exception as e:
|
| 106 |
+
logger.error(f"LLM specification failed: {e}")
|
| 107 |
+
|
| 108 |
+
# Fallback if no specs were generated
|
| 109 |
+
if not query:
|
| 110 |
+
return [
|
| 111 |
+
ChartSpec(chart_type=ChartType.PACE, title="Pace Chart"),
|
| 112 |
+
ChartSpec(chart_type=ChartType.HEART_RATE, title="Heart Rate Chart"),
|
| 113 |
+
]
|
| 114 |
+
return []
|
src/application/dto/runner_api_response.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Any, Dict, Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@dataclass
|
| 6 |
+
class RunnerAPIResponse:
|
| 7 |
+
num_runs: int
|
| 8 |
+
weeks: int
|
| 9 |
+
features: list
|
| 10 |
+
insights: Any
|
| 11 |
+
plan: Optional[str]
|
| 12 |
+
risk_assessment: Optional[Dict]
|
| 13 |
+
trends: Any
|
| 14 |
+
period_comparison: Any
|
| 15 |
+
charts: Any
|
| 16 |
+
profile: Optional[Dict]
|
| 17 |
+
goal_progress: Optional[Dict]
|
| 18 |
+
goal_trajectory: Optional[Any]
|
| 19 |
+
positioning: Optional[Dict]
|
| 20 |
+
recommendation: Optional[Dict]
|
| 21 |
+
active_goal: Optional[Dict]
|
| 22 |
+
|
| 23 |
+
# ✅ New product surface
|
| 24 |
+
intelligence_snapshot: Optional[Dict]
|
| 25 |
+
mode: str = "RunnerOrchestrator"
|
src/application/goal_trajectory_service.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from domain.goals.goal_trajectory_engine import GoalTrajectoryEngine
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class GoalTrajectoryService:
|
| 5 |
+
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.engine = GoalTrajectoryEngine()
|
| 8 |
+
|
| 9 |
+
def compute(self, goal, snapshot, trend, runner_profile=None):
|
| 10 |
+
return self.engine.compute(goal, snapshot, trend, runner_profile)
|
src/application/positioning_service.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Dict, List, Literal
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from datetime import date
|
| 4 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 5 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 6 |
+
from domain.runner_positioning import TrainingPhase
|
| 7 |
+
from _app.presentation.ui_text import get_text
|
| 8 |
+
|
| 9 |
+
class WeeklyPositioning(BaseModel):
|
| 10 |
+
"""
|
| 11 |
+
Application-layer model for Positioning Intelligence v1.
|
| 12 |
+
Narrative-focused assessment of the runner's current standing.
|
| 13 |
+
"""
|
| 14 |
+
status: Literal["CONSTRUCTIVE_ADAPTATION", "PRODUCTIVE_LOAD", "STRAIN", "PLATEAU", "BASELINE_BUILDING"]
|
| 15 |
+
signal_strength: float = 1.0 # 0.0 to 1.0
|
| 16 |
+
rationale: str
|
| 17 |
+
training_phase: TrainingPhase = TrainingPhase.BASE
|
| 18 |
+
|
| 19 |
+
@property
|
| 20 |
+
def status_value(self) -> str:
|
| 21 |
+
return self.status
|
| 22 |
+
|
| 23 |
+
class PositioningEngine:
|
| 24 |
+
"""
|
| 25 |
+
Engine to compute high-level positioning status from snapshots and trends.
|
| 26 |
+
"""
|
| 27 |
+
def detect_training_phase(self, snapshot: WeeklySnapshot, trend: WeeklyTrend) -> TrainingPhase:
|
| 28 |
+
"""
|
| 29 |
+
Infer training phase from workload and trend signals.
|
| 30 |
+
"""
|
| 31 |
+
distance_delta = trend.distance_delta_pct or 0
|
| 32 |
+
pace_delta = trend.pace_delta_s_per_km or 0
|
| 33 |
+
run_count = snapshot.run_count
|
| 34 |
+
# consistency = snapshot.consistency_score or 0
|
| 35 |
+
|
| 36 |
+
# Recovery phase (very low activity and sharp drop in load)
|
| 37 |
+
if run_count <= 1 and distance_delta < -80:
|
| 38 |
+
return TrainingPhase.RECOVERY
|
| 39 |
+
|
| 40 |
+
# Plateau phase (load dropped or adaptation stalled)
|
| 41 |
+
if distance_delta < -20:
|
| 42 |
+
return TrainingPhase.PLATEAU
|
| 43 |
+
|
| 44 |
+
# Build phase (clear volume increase)
|
| 45 |
+
if distance_delta > 5:
|
| 46 |
+
return TrainingPhase.BUILD
|
| 47 |
+
|
| 48 |
+
# Peak phase (performance improving without load reduction)
|
| 49 |
+
if pace_delta < -5 and distance_delta >= 0:
|
| 50 |
+
return TrainingPhase.PEAK
|
| 51 |
+
|
| 52 |
+
# Default: base training
|
| 53 |
+
return TrainingPhase.BASE
|
| 54 |
+
|
| 55 |
+
def compute(self, snapshot: WeeklySnapshot, trend: WeeklyTrend) -> WeeklyPositioning:
|
| 56 |
+
if not trend or not trend.comparison_available:
|
| 57 |
+
return WeeklyPositioning(
|
| 58 |
+
status="BASELINE_BUILDING",
|
| 59 |
+
rationale="positioning_rationale_building",
|
| 60 |
+
signal_strength=0.1,
|
| 61 |
+
training_phase=TrainingPhase.BASE
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# 1. Logic to determine status
|
| 65 |
+
# This is a v1 heuristic mapping
|
| 66 |
+
status = "PLATEAU"
|
| 67 |
+
rationale_key = "positioning_rationale_plateau"
|
| 68 |
+
|
| 69 |
+
if trend.comparison_available:
|
| 70 |
+
# Constructive Adaptation: Increasing load with positive/stable consistency
|
| 71 |
+
if trend.distance_delta_pct > 5.0 and trend.consistency_delta >= -5.0:
|
| 72 |
+
status = "CONSTRUCTIVE_ADAPTATION"
|
| 73 |
+
rationale_key = "positioning_rationale_constructive_adaptation"
|
| 74 |
+
# Strain: High load increase or high HR increase
|
| 75 |
+
elif trend.distance_delta_pct > 20.0 or (trend.hr_delta and trend.hr_delta > 5.0):
|
| 76 |
+
status = "STRAIN"
|
| 77 |
+
rationale_key = "positioning_rationale_strain"
|
| 78 |
+
# Productive Load: Stable or slightly increasing load
|
| 79 |
+
elif trend.distance_delta_pct >= -5.0:
|
| 80 |
+
status = "PRODUCTIVE_LOAD"
|
| 81 |
+
rationale_key = "positioning_rationale_productive_load"
|
| 82 |
+
else:
|
| 83 |
+
status = "BASELINE_BUILDING"
|
| 84 |
+
rationale_key = "positioning_rationale_building"
|
| 85 |
+
|
| 86 |
+
training_phase = self.detect_training_phase(snapshot, trend)
|
| 87 |
+
|
| 88 |
+
return WeeklyPositioning(
|
| 89 |
+
status=status,
|
| 90 |
+
rationale=rationale_key, # Store key here
|
| 91 |
+
signal_strength=1.0 if trend.comparison_available else 0.5,
|
| 92 |
+
training_phase=training_phase
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
def build_positioning_view(
|
| 96 |
+
snapshot: WeeklySnapshot,
|
| 97 |
+
trend: WeeklyTrend,
|
| 98 |
+
positioning: WeeklyPositioning,
|
| 99 |
+
goal_progress: Optional[dict] = None,
|
| 100 |
+
language: str = "en"
|
| 101 |
+
) -> dict:
|
| 102 |
+
"""
|
| 103 |
+
Builds a UI-ready positioning view using BOTH absolute state (snapshot)
|
| 104 |
+
and relative changes (trend). This restores coaching-quality output.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
positioning = PositioningEngine().compute(snapshot, trend)
|
| 108 |
+
|
| 109 |
+
# --- Absolute context (snapshot) ---
|
| 110 |
+
total_distance = snapshot.total_distance_km or 0
|
| 111 |
+
run_count = snapshot.run_count or 0
|
| 112 |
+
avg_pace = snapshot.avg_pace_sec_per_km
|
| 113 |
+
avg_hr = snapshot.avg_hr
|
| 114 |
+
|
| 115 |
+
# --- Relative context (trend) ---
|
| 116 |
+
distance_delta = trend.distance_delta_pct or 0
|
| 117 |
+
pace_delta = trend.pace_delta_s_per_km or 0
|
| 118 |
+
hr_delta = trend.hr_delta or 0
|
| 119 |
+
freq_delta = int(trend.frequency_delta or 0)
|
| 120 |
+
consistency_delta = trend.consistency_delta or 0
|
| 121 |
+
|
| 122 |
+
# --- Headline (context-aware) ---
|
| 123 |
+
if run_count <= 1:
|
| 124 |
+
headline = get_text("positioning_headline_building", language)
|
| 125 |
+
elif total_distance < 5:
|
| 126 |
+
headline = f"Your training volume is currently low at {total_distance:.1f} km."
|
| 127 |
+
else:
|
| 128 |
+
status_lower = positioning.status.lower()
|
| 129 |
+
headline = get_text(f"positioning_headline_{status_lower}", language)
|
| 130 |
+
|
| 131 |
+
# --- State ---
|
| 132 |
+
status_icons = {
|
| 133 |
+
"CONSTRUCTIVE_ADAPTATION": "🟢",
|
| 134 |
+
"PRODUCTIVE_LOAD": "🟡",
|
| 135 |
+
"STRAIN": "🔴",
|
| 136 |
+
"PLATEAU": "⚪",
|
| 137 |
+
"BASELINE_BUILDING": "⚪",
|
| 138 |
+
}
|
| 139 |
+
status_icon = status_icons.get(positioning.status, "⚪")
|
| 140 |
+
status_name = get_text(f"positioning_status_{positioning.status.lower()}", language)
|
| 141 |
+
|
| 142 |
+
# --- Health ---
|
| 143 |
+
if hr_delta > 5:
|
| 144 |
+
health_signal = f"🔴 {get_text('health_strain', language)}"
|
| 145 |
+
else:
|
| 146 |
+
health_signal = f"🟢 {get_text('health_stable', language)}"
|
| 147 |
+
|
| 148 |
+
# --- Goal trajectory ---
|
| 149 |
+
if goal_progress:
|
| 150 |
+
if positioning.status == "BASELINE_BUILDING":
|
| 151 |
+
goal_trajectory = get_text("trajectory_establishing", language)
|
| 152 |
+
elif positioning.status == "CONSTRUCTIVE_ADAPTATION":
|
| 153 |
+
goal_trajectory = f"🎯 {get_text('trajectory_improving', language)}"
|
| 154 |
+
else:
|
| 155 |
+
goal_trajectory = f"🎯 {get_text('trajectory_maintaining', language)}"
|
| 156 |
+
else:
|
| 157 |
+
goal_trajectory = get_text("trajectory_no_goal", language)
|
| 158 |
+
|
| 159 |
+
# --- Forward focus ---
|
| 160 |
+
forward_focus = get_text(f"positioning_forward_focus_{positioning.status.lower()}", language)
|
| 161 |
+
|
| 162 |
+
# --- Narrative insight ---
|
| 163 |
+
insight = get_text(positioning.rationale, language)
|
| 164 |
+
|
| 165 |
+
# --- Evidence (enriched with absolute + relative) ---
|
| 166 |
+
evidence = {
|
| 167 |
+
"distance": f"{total_distance:.1f} km ({distance_delta:+.1f}%)",
|
| 168 |
+
"pace": f"{pace_delta:+.1f} sec/km" if pace_delta else None,
|
| 169 |
+
"hr": f"{hr_delta:+.1f} bpm" if hr_delta else None,
|
| 170 |
+
"frequency": f"{run_count} runs ({freq_delta:+d})",
|
| 171 |
+
"consistency": f"{int(consistency_delta):+d}",
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# Remove empty evidence fields
|
| 175 |
+
evidence = {k: v for k, v in evidence.items() if v is not None}
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"headline": headline,
|
| 179 |
+
"state": f"{status_icon} {status_name}",
|
| 180 |
+
"health_signal": health_signal,
|
| 181 |
+
"goal_trajectory": goal_trajectory,
|
| 182 |
+
"training_phase": positioning.training_phase.value if hasattr(positioning.training_phase, "value") else positioning.training_phase,
|
| 183 |
+
"forward_focus": forward_focus,
|
| 184 |
+
"trajectory": get_text(f"positioning_trajectory_{positioning.status.lower()}", language),
|
| 185 |
+
"insight": insight,
|
| 186 |
+
"evidence": evidence if positioning.status != "BASELINE_BUILDING" else None,
|
| 187 |
+
}
|
src/application/recommendation_service.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Any
|
| 2 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 3 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 4 |
+
from domain.runner_positioning import RunnerPositioning, TrainingPhase
|
| 5 |
+
from domain.training.training_recommendation import TrainingRecommendation
|
| 6 |
+
from _app.presentation.ui_text import get_text
|
| 7 |
+
|
| 8 |
+
class RecommendationService:
|
| 9 |
+
"""
|
| 10 |
+
Stateless service to generate training recommendations based on positioning.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def generate(
|
| 14 |
+
self,
|
| 15 |
+
snapshot: WeeklySnapshot,
|
| 16 |
+
trend: WeeklyTrend,
|
| 17 |
+
positioning: Any, # Can be RunnerPositioning or WeeklyPositioning
|
| 18 |
+
language: str = "en"
|
| 19 |
+
) -> TrainingRecommendation:
|
| 20 |
+
"""
|
| 21 |
+
Pure rule-based logic to map positioning to training focus and session types.
|
| 22 |
+
"""
|
| 23 |
+
# Mapping from positioning focus or status
|
| 24 |
+
# Handle both Domain (RunnerPositioning) and Application (WeeklyPositioning) models
|
| 25 |
+
if hasattr(positioning, "recommended_focus"):
|
| 26 |
+
focus_val = positioning.recommended_focus
|
| 27 |
+
else:
|
| 28 |
+
# Fallback for WeeklyPositioning (Application Layer)
|
| 29 |
+
status_map = {
|
| 30 |
+
"CONSTRUCTIVE_ADAPTATION": "INTENSITY",
|
| 31 |
+
"PRODUCTIVE_LOAD": "CONSISTENCY",
|
| 32 |
+
"STRAIN": "RECOVERY",
|
| 33 |
+
"PLATEAU": "MAINTENANCE"
|
| 34 |
+
}
|
| 35 |
+
focus_val = status_map.get(getattr(positioning, "status", ""), "MAINTENANCE")
|
| 36 |
+
|
| 37 |
+
focus_key = focus_val.lower()
|
| 38 |
+
|
| 39 |
+
# Initial recommendation mapping as per requirements:
|
| 40 |
+
# Building Momentum (CONSTRUCTIVE_ADAPTATION) -> introduce_intensity / tempo_intervals
|
| 41 |
+
# Maintaining Consistency (PRODUCTIVE_LOAD) -> build_endurance / long_run
|
| 42 |
+
# Recovery (STRAIN) -> protect_recovery / easy_run
|
| 43 |
+
# Default -> maintain_consistency
|
| 44 |
+
|
| 45 |
+
mapping = {
|
| 46 |
+
"RECOVERY": {
|
| 47 |
+
"focus": "protect_recovery",
|
| 48 |
+
"session_type": "easy_run",
|
| 49 |
+
"confidence": 0.9
|
| 50 |
+
},
|
| 51 |
+
"CONSISTENCY": {
|
| 52 |
+
"focus": "build_endurance",
|
| 53 |
+
"session_type": "long_run",
|
| 54 |
+
"confidence": 0.8
|
| 55 |
+
},
|
| 56 |
+
"INTENSITY": {
|
| 57 |
+
"focus": "introduce_intensity",
|
| 58 |
+
"session_type": "tempo_intervals",
|
| 59 |
+
"confidence": 0.85
|
| 60 |
+
},
|
| 61 |
+
"MAINTENANCE": {
|
| 62 |
+
"focus": "maintain_consistency",
|
| 63 |
+
"session_type": "steady_run",
|
| 64 |
+
"confidence": 0.75
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
rec_data = mapping.get(focus_val, {
|
| 69 |
+
"focus": "maintain_consistency",
|
| 70 |
+
"session_type": "steady_run",
|
| 71 |
+
"confidence": 0.5
|
| 72 |
+
})
|
| 73 |
+
|
| 74 |
+
# Resolve localized description
|
| 75 |
+
description = get_text(f"rec_desc_{rec_data['focus']}", language)
|
| 76 |
+
|
| 77 |
+
return TrainingRecommendation(
|
| 78 |
+
focus=get_text(f"rec_focus_{rec_data['focus']}", language),
|
| 79 |
+
session_type=get_text(f"rec_session_{rec_data['session_type']}", language),
|
| 80 |
+
description=description,
|
| 81 |
+
confidence=rec_data["confidence"]
|
| 82 |
+
)
|
src/application/runner_positioning_service.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Dict, Any
|
| 2 |
+
from domain.runner_positioning import RunnerPositioning
|
| 3 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 4 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 5 |
+
from application.positioning_service import PositioningEngine
|
| 6 |
+
|
| 7 |
+
class RunnerPositioningService:
|
| 8 |
+
"""
|
| 9 |
+
Application service for Runner Positioning.
|
| 10 |
+
Enforces architectural boundaries: no LLM, no persistence, no observability here.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def generate(
|
| 14 |
+
self,
|
| 15 |
+
snapshot: WeeklySnapshot,
|
| 16 |
+
trend: WeeklyTrend,
|
| 17 |
+
goal_progress: Optional[Dict[str, Any]] = None
|
| 18 |
+
) -> RunnerPositioning:
|
| 19 |
+
"""
|
| 20 |
+
Aggregates domain data into a RunnerPositioning assessment.
|
| 21 |
+
"""
|
| 22 |
+
# Extract necessary values for deterministic logic
|
| 23 |
+
target_distance = None
|
| 24 |
+
if goal_progress:
|
| 25 |
+
# goal_service.compute_goal_progress returns a dict
|
| 26 |
+
# We assume it has a 'target_value' if applicable
|
| 27 |
+
target_distance = goal_progress.get("target_value")
|
| 28 |
+
|
| 29 |
+
# Call domain logic
|
| 30 |
+
engine = PositioningEngine()
|
| 31 |
+
training_phase = engine.detect_training_phase(snapshot, trend)
|
| 32 |
+
|
| 33 |
+
return RunnerPositioning.compute(
|
| 34 |
+
week_start=snapshot.week_start_date,
|
| 35 |
+
total_distance=snapshot.total_distance_km,
|
| 36 |
+
target_distance=target_distance,
|
| 37 |
+
consistency_score=snapshot.consistency_score,
|
| 38 |
+
pace_delta=trend.pace_delta_s_per_km,
|
| 39 |
+
hr_delta=trend.hr_delta,
|
| 40 |
+
distance_delta_pct=trend.distance_delta_pct,
|
| 41 |
+
comparison_available=trend.comparison_available,
|
| 42 |
+
training_phase=training_phase
|
| 43 |
+
)
|
src/config.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import uuid
|
| 3 |
+
|
| 4 |
+
# Application Constants
|
| 5 |
+
APP_NAME = "agents"
|
| 6 |
+
USER_ID = "default_user"
|
| 7 |
+
DEFAULT_RUNNER_ID = "00000000-0000-0000-0000-000000000000"
|
| 8 |
+
|
| 9 |
+
# Environment Flags
|
| 10 |
+
HF_SPACE = os.getenv("HF_SPACE", "false").lower() == "true"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def is_hf_space() -> bool:
|
| 14 |
+
"""Helper to check if running in HuggingFace Spaces."""
|
| 15 |
+
return HF_SPACE
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Ingestion Limits
|
| 19 |
+
_DEFAULT_MAX_FILES = "12" if is_hf_space() else "100"
|
| 20 |
+
_DEFAULT_MAX_FILE_SIZE = "10" if is_hf_space() else "50"
|
| 21 |
+
_DEFAULT_MAX_TOTAL_SIZE = "20" if is_hf_space() else "250"
|
| 22 |
+
|
| 23 |
+
def _safe_int(value: str, default: int) -> int:
|
| 24 |
+
"""Safely convert string to int, stripping common corrupted characters like '§'."""
|
| 25 |
+
if not value:
|
| 26 |
+
return default
|
| 27 |
+
try:
|
| 28 |
+
# Strip common non-numeric suffixes that might appear due to env corruption
|
| 29 |
+
clean_val = ""
|
| 30 |
+
for char in str(value):
|
| 31 |
+
if char.isdigit():
|
| 32 |
+
clean_val += char
|
| 33 |
+
else:
|
| 34 |
+
break
|
| 35 |
+
return int(clean_val) if clean_val else default
|
| 36 |
+
except (ValueError, TypeError):
|
| 37 |
+
return default
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
MAX_UPLOAD_FILES = _safe_int(os.getenv("MAX_UPLOAD_FILES"), int(_DEFAULT_MAX_FILES))
|
| 41 |
+
MAX_UPLOAD_FILE_SIZE_MB = _safe_int(os.getenv("MAX_UPLOAD_FILE_SIZE_MB"), int(_DEFAULT_MAX_FILE_SIZE))
|
| 42 |
+
MAX_UPLOAD_TOTAL_SIZE_MB = _safe_int(os.getenv("MAX_UPLOAD_TOTAL_SIZE_MB"), int(_DEFAULT_MAX_TOTAL_SIZE))
|
| 43 |
+
MAX_GZIP_DECOMPRESSED_SIZE_MB = _safe_int(os.getenv("MAX_GZIP_DECOMPRESSED_SIZE_MB"), 200)
|
| 44 |
+
ALLOWED_UPLOAD_EXTENSIONS = os.getenv(
|
| 45 |
+
"ALLOWED_UPLOAD_EXTENSIONS", ".gpx,.tcx.gz,.tcx,.fit,.fit.gz"
|
| 46 |
+
).split(",")
|
| 47 |
+
|
| 48 |
+
# Storage Configuration
|
| 49 |
+
_DEFAULT_STORAGE_ENABLED = "false" if is_hf_space() else "true"
|
| 50 |
+
STORAGE_ENABLED = os.getenv("STORAGE_ENABLED", _DEFAULT_STORAGE_ENABLED).lower() == "true"
|
| 51 |
+
STORAGE_DB_PATH = os.getenv("STORAGE_DB_PATH", ".data/runner_agentic_intelligence.db")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def is_storage_enabled() -> bool:
|
| 55 |
+
"""Helper to check if persistence is enabled."""
|
| 56 |
+
return STORAGE_ENABLED
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def is_persistence_enabled() -> bool:
|
| 60 |
+
"""
|
| 61 |
+
Returns True only when storage is enabled AND not running in HF public preview.
|
| 62 |
+
"""
|
| 63 |
+
return is_storage_enabled() and not is_hf_space()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def generate_session_id() -> str:
|
| 67 |
+
"""Generate a unique session ID."""
|
| 68 |
+
return str(uuid.uuid4())
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# LLM Configuration
|
| 72 |
+
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "gemini") # "gemini", "mock", "litellm"
|
| 73 |
+
LLM_MODEL_ID = os.getenv("LLM_MODEL", "")
|
| 74 |
+
LLM_API_BASE = os.getenv("LLM_API_BASE", None)
|
| 75 |
+
LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "1.0"))
|
| 76 |
+
LLM_MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "8192"))
|
| 77 |
+
LLM_DROP_PARAMS = True
|
| 78 |
+
LLM_SCHEMA_GUARD_STRICT = os.getenv("LLM_SCHEMA_GUARD_STRICT", "false").lower() == "true"
|
| 79 |
+
AI_INSIGHTS_ENABLED = os.getenv("AI_INSIGHTS_ENABLED", "true").lower() == "true"
|
| 80 |
+
|
| 81 |
+
# LiteLLM / OpenAI compatible settings
|
| 82 |
+
# All specific providers are now handled by the LiteLLM client layer.
|
| 83 |
+
if LLM_PROVIDER.lower() == "litellm":
|
| 84 |
+
# Ensure we use LLM_MODEL env var if provided first
|
| 85 |
+
ENV_MODEL = os.getenv("LLM_MODEL")
|
| 86 |
+
if ENV_MODEL:
|
| 87 |
+
LLM_MODEL_ID = ENV_MODEL
|
| 88 |
+
|
| 89 |
+
if not LLM_MODEL_ID:
|
| 90 |
+
# Default model if not provided
|
| 91 |
+
LLM_MODEL_ID = "openai/gpt-4o-mini"
|
| 92 |
+
|
| 93 |
+
# Simple heuristic to distinguish between OpenAI proper and others (like HF Router)
|
| 94 |
+
is_real_openai = LLM_MODEL_ID.startswith("openai/") and "gpt-oss" not in LLM_MODEL_ID
|
| 95 |
+
|
| 96 |
+
if is_real_openai:
|
| 97 |
+
if "gpt-5" in LLM_MODEL_ID:
|
| 98 |
+
LLM_TEMPERATURE = 1.0
|
| 99 |
+
else:
|
| 100 |
+
# Default to HuggingFace Router for other LiteLLM models unless api_base is set
|
| 101 |
+
if not LLM_API_BASE:
|
| 102 |
+
LLM_API_BASE = "https://router.huggingface.co/v1"
|
| 103 |
+
else:
|
| 104 |
+
# Default fallback for Gemini or other non-explicit LiteLLM provider
|
| 105 |
+
LLM_MODEL_ID = os.getenv("LLM_MODEL") or os.getenv("GEMINI_MODEL", "gemini-3-flash-preview")
|
| 106 |
+
|
| 107 |
+
# Deprecated: do not use global LLM_API_KEY for multi-provider setup.
|
| 108 |
+
# LLM clients now resolve their own credentials from environment variables.
|
| 109 |
+
LLM_API_KEY = os.getenv("LLM_API_KEY")
|
src/core/intelligence/intelligence_builder.py
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Dict, Optional, Any
|
| 3 |
+
from .runner_intelligence_snapshot import RunnerIntelligenceSnapshot
|
| 4 |
+
from _app.presentation.ui_text import UI_TEXT
|
| 5 |
+
from tools.helpers import decode_chart
|
| 6 |
+
|
| 7 |
+
def _format_pace(seconds_per_km: float) -> str:
|
| 8 |
+
if not seconds_per_km or seconds_per_km <= 0:
|
| 9 |
+
return "N/A"
|
| 10 |
+
minutes = int(seconds_per_km // 60)
|
| 11 |
+
seconds = int(seconds_per_km % 60)
|
| 12 |
+
return f"{minutes}:{seconds:02d} /km"
|
| 13 |
+
|
| 14 |
+
def _build_performance_story(snapshot: RunnerIntelligenceSnapshot, trend_dict: Dict, language: str) -> str:
|
| 15 |
+
t = UI_TEXT.get(language, UI_TEXT["en"])
|
| 16 |
+
runs_word = t.get("unit_runs")
|
| 17 |
+
dist_word = t.get("lbl_total_of")
|
| 18 |
+
|
| 19 |
+
story_tpl = t.get("home_story_template", "")
|
| 20 |
+
story = story_tpl.format(count=snapshot.run_count, unit=runs_word, of=dist_word, dist=snapshot.weekly_distance_km)
|
| 21 |
+
|
| 22 |
+
comparison = trend_dict.get("comparison_available", False) if trend_dict else False
|
| 23 |
+
if comparison:
|
| 24 |
+
dist_delta = trend_dict.get("distance_delta_pct", 0)
|
| 25 |
+
pace_delta = trend_dict.get("pace_delta_s_per_km", 0)
|
| 26 |
+
|
| 27 |
+
dist_trend = t.get("lbl_more" if dist_delta > 0 else "lbl_less", "more" if dist_delta > 0 else "less")
|
| 28 |
+
pace_trend = t.get("lbl_faster" if pace_delta < 0 else "lbl_slower", "faster" if pace_delta < 0 else "slower")
|
| 29 |
+
|
| 30 |
+
that_is = t.get("lbl_that_is")
|
| 31 |
+
than_avg = t.get("lbl_than_avg")
|
| 32 |
+
pace_was = t.get("lbl_pace_was")
|
| 33 |
+
|
| 34 |
+
if abs(dist_delta) > 5:
|
| 35 |
+
story += f" {that_is} **{abs(dist_delta):.1f}% {dist_trend}** {than_avg}"
|
| 36 |
+
|
| 37 |
+
if abs(pace_delta) > 2:
|
| 38 |
+
formatted_delta = _format_pace(abs(pace_delta)).replace(" /km", "")
|
| 39 |
+
story += f" {pace_was} **{formatted_delta} {pace_trend}**."
|
| 40 |
+
|
| 41 |
+
return story
|
| 42 |
+
|
| 43 |
+
def _build_delta_summary(trend_dict: Dict, language: str) -> Dict[str, str]:
|
| 44 |
+
if not trend_dict or not trend_dict.get("comparison_available"):
|
| 45 |
+
return {}
|
| 46 |
+
|
| 47 |
+
t = UI_TEXT.get(language, UI_TEXT["en"])
|
| 48 |
+
|
| 49 |
+
def format_val(metric_name, delta_val):
|
| 50 |
+
if delta_val is None:
|
| 51 |
+
return t.get("na", "N/A")
|
| 52 |
+
|
| 53 |
+
is_positive_good = metric_name in ["distance", "frequency", "consistency"]
|
| 54 |
+
icon = "⚪"
|
| 55 |
+
if delta_val > 0:
|
| 56 |
+
icon = "🟢" if is_positive_good else "🔴"
|
| 57 |
+
elif delta_val < 0:
|
| 58 |
+
icon = "🔴" if is_positive_good else "🟢"
|
| 59 |
+
|
| 60 |
+
formatted = f"{delta_val:+.1f}"
|
| 61 |
+
if metric_name == "distance":
|
| 62 |
+
formatted = f"{delta_val:+.1f}%"
|
| 63 |
+
elif metric_name == "pace":
|
| 64 |
+
unit = t.get("unit_spkm", "s/km")
|
| 65 |
+
formatted = f"{delta_val:+.1f} {unit}"
|
| 66 |
+
elif metric_name == "frequency":
|
| 67 |
+
unit = t.get("unit_runs", "runs")
|
| 68 |
+
formatted = f"{int(delta_val):+d} {unit}"
|
| 69 |
+
elif metric_name == "hr":
|
| 70 |
+
unit = t.get("unit_bpm", "bpm")
|
| 71 |
+
formatted = f"{delta_val:+.1f} {unit}"
|
| 72 |
+
elif metric_name == "consistency":
|
| 73 |
+
unit = t.get("unit_pts", "pts")
|
| 74 |
+
formatted = f"{int(delta_val):+d} {unit}"
|
| 75 |
+
|
| 76 |
+
return f"{icon} {formatted}"
|
| 77 |
+
|
| 78 |
+
return {
|
| 79 |
+
"distance": format_val("distance", trend_dict.get("distance_delta_pct")),
|
| 80 |
+
"pace": format_val("pace", trend_dict.get("pace_delta_s_per_km")),
|
| 81 |
+
"frequency": format_val("frequency", trend_dict.get("frequency_delta")),
|
| 82 |
+
"hr": format_val("hr", trend_dict.get("hr_delta")),
|
| 83 |
+
"consistency": format_val("consistency", trend_dict.get("consistency_delta"))
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def _build_evidence_view(positioning_view: Dict, trend: Dict, language: str) -> str:
|
| 87 |
+
t = UI_TEXT.get(language, UI_TEXT["en"])
|
| 88 |
+
evidence = positioning_view.get("evidence", "")
|
| 89 |
+
if evidence and isinstance(evidence, dict):
|
| 90 |
+
pace_delta = trend.get('pace_trend_s_per_km') if trend else 0
|
| 91 |
+
if pace_delta is None: pace_delta = 0
|
| 92 |
+
pace_word = t.get('positioning_evidence_pace_improved', 'Improved') if pace_delta <= 0 else t.get('positioning_evidence_pace_worsened', 'Worsened')
|
| 93 |
+
|
| 94 |
+
return f"""
|
| 95 |
+
📈 {t.get('positioning_evidence_distance', 'Dist')}: {evidence.get('distance', 0)} <br>
|
| 96 |
+
⚡ {pace_word}: {evidence.get("pace")} <br>
|
| 97 |
+
🫀 {t.get('positioning_evidence_hr', 'HR')}: {evidence.get('hr', 0)} <br>
|
| 98 |
+
🏃 {t.get('lbl_runs_count', 'Runs')}: {evidence.get("frequency")} <br>
|
| 99 |
+
🎯 {t.get('positioning_evidence_consistency', 'Consistency')}: {evidence.get('consistency', 0)}
|
| 100 |
+
""".strip()
|
| 101 |
+
else:
|
| 102 |
+
return f"{positioning_view.get('trajectory', '')}\n".strip()
|
| 103 |
+
|
| 104 |
+
def _build_structure_view(structure_status: Dict, recommendation: Dict, language: str) -> str:
|
| 105 |
+
if not structure_status:
|
| 106 |
+
return ""
|
| 107 |
+
|
| 108 |
+
t = UI_TEXT.get(language, UI_TEXT["en"])
|
| 109 |
+
wd_comp = structure_status.get("weekday_completed", 0)
|
| 110 |
+
wd_total = structure_status.get("weekday_total", 0)
|
| 111 |
+
lr_comp = "✅" if structure_status.get("long_run_completed") else "⏳"
|
| 112 |
+
classif = structure_status.get("classification", "reset_week")
|
| 113 |
+
classif_lbl = t.get(classif, classif)
|
| 114 |
+
km_rem = structure_status.get("km_remaining", 0.0)
|
| 115 |
+
|
| 116 |
+
km_rem_subtext = ""
|
| 117 |
+
if km_rem > 0:
|
| 118 |
+
subtext_tpl = t.get("lbl_km_remaining_subtext", "{val} km")
|
| 119 |
+
km_rem_subtext = f'<span class="subtext">{subtext_tpl.format(val=f"{km_rem:.1f}")}</span>'
|
| 120 |
+
|
| 121 |
+
advice = recommendation.get('description', "") if recommendation else t.get("coaching_advice", "")
|
| 122 |
+
advice_html = f'<div class="coaching-tip">{advice}</div>' if advice else ""
|
| 123 |
+
|
| 124 |
+
return f"""
|
| 125 |
+
<div class="metric-row"><span class="metric-label">{t.get('lbl_weekday_runs', 'Weekday')}:</span> <span class="metric-value">{wd_comp} / {wd_total}</span></div>
|
| 126 |
+
<div class="metric-row"><span class="metric-label">{t.get('lbl_long_run', 'Long Run')}:</span> <span class="metric-value">{lr_comp}</span></div>
|
| 127 |
+
<div class="metric-row"><span class="metric-label">{t.get('lbl_structure_status', 'Status')}:</span> <span class="metric-value">{classif_lbl}</span></div>
|
| 128 |
+
<div class="metric-row"><span class="metric-label">{t.get('lbl_km_remaining', 'Remaining') + ': ' if km_rem > 0 else ''}</span> {km_rem_subtext}</div>
|
| 129 |
+
{advice_html}
|
| 130 |
+
""".strip()
|
| 131 |
+
|
| 132 |
+
def _build_goal_status_text(active_goal: Dict, language: str) -> str:
|
| 133 |
+
if not active_goal:
|
| 134 |
+
return ""
|
| 135 |
+
|
| 136 |
+
t = UI_TEXT.get(language, UI_TEXT["en"])
|
| 137 |
+
status_key = active_goal.get("status", "unknown")
|
| 138 |
+
status_lbl = t.get(f"goal_status_{status_key}", status_key)
|
| 139 |
+
tpl = t.get("goal_status_template", "Goal status: {val}")
|
| 140 |
+
return tpl.format(val=status_lbl)
|
| 141 |
+
|
| 142 |
+
def build_intelligence_snapshot(context) -> RunnerIntelligenceSnapshot:
|
| 143 |
+
"""
|
| 144 |
+
Builds a RunnerIntelligenceSnapshot from a PipelineContext.
|
| 145 |
+
|
| 146 |
+
This is an aggregation layer only. It uses safe accessors (`getattr`)
|
| 147 |
+
to extract already-computed values without introducing new business logic.
|
| 148 |
+
"""
|
| 149 |
+
summary = context.summary
|
| 150 |
+
|
| 151 |
+
# Helper to safely extract depending on whether summary is a dict, WeeklySnapshot, or WeeklySummary
|
| 152 |
+
def _extract_summary_val(dict_key, attr_names, default, transform=None):
|
| 153 |
+
if not summary:
|
| 154 |
+
return default
|
| 155 |
+
|
| 156 |
+
val = default
|
| 157 |
+
if isinstance(summary, dict):
|
| 158 |
+
val = summary.get(dict_key, default)
|
| 159 |
+
else:
|
| 160 |
+
for attr in attr_names:
|
| 161 |
+
if hasattr(summary, attr):
|
| 162 |
+
val = getattr(summary, attr, default)
|
| 163 |
+
break
|
| 164 |
+
|
| 165 |
+
return transform(val) if transform and val != default else val
|
| 166 |
+
|
| 167 |
+
# --- Extract signals from domain objects (projection layer) ---
|
| 168 |
+
recommendation_obj = context.recommendation
|
| 169 |
+
insights_obj = context.insights or {}
|
| 170 |
+
|
| 171 |
+
training_state = None
|
| 172 |
+
health_signal = None
|
| 173 |
+
positioning_status = None
|
| 174 |
+
positioning_change = None
|
| 175 |
+
|
| 176 |
+
next_run = None
|
| 177 |
+
training_focus = None
|
| 178 |
+
training_type = None
|
| 179 |
+
training_why = None
|
| 180 |
+
|
| 181 |
+
performance_brief = None
|
| 182 |
+
performance_focus = None
|
| 183 |
+
|
| 184 |
+
if recommendation_obj:
|
| 185 |
+
training_focus = getattr(recommendation_obj, "focus", None)
|
| 186 |
+
training_type = getattr(recommendation_obj, "session_type", None)
|
| 187 |
+
training_why = getattr(recommendation_obj, "description", None)
|
| 188 |
+
|
| 189 |
+
if not next_run: next_run = getattr(context, "next_run", None)
|
| 190 |
+
if not training_focus: training_focus = getattr(context, "training_focus", None)
|
| 191 |
+
|
| 192 |
+
key_insight = None
|
| 193 |
+
forward_focus = None
|
| 194 |
+
goal_trajectory = None
|
| 195 |
+
goal_progress_pct = None
|
| 196 |
+
positioning_view = None
|
| 197 |
+
active_goal = None
|
| 198 |
+
goal_view = None
|
| 199 |
+
|
| 200 |
+
if getattr(context, "positioning_view", None):
|
| 201 |
+
positioning_view = context.positioning_view
|
| 202 |
+
training_state = positioning_view.get("training_phase", None)
|
| 203 |
+
health_signal = positioning_view.get("health_signal", None)
|
| 204 |
+
positioning_status = positioning_view.get("state", None)
|
| 205 |
+
positioning_change = positioning_view.get("change", None)
|
| 206 |
+
forward_focus = positioning_view.get("forward_focus", None)
|
| 207 |
+
key_insight = positioning_view.get("insight", None)
|
| 208 |
+
goal_trajectory = positioning_view.get("goal_trajectory", None)
|
| 209 |
+
goal_progress_pct = positioning_view.get("goal_progress_pct", None)
|
| 210 |
+
|
| 211 |
+
if hasattr(context, "weekly_snapshot"):
|
| 212 |
+
weekly_snapshot = context.weekly_snapshot
|
| 213 |
+
performance_brief = getattr(weekly_snapshot, "performance_brief", None)
|
| 214 |
+
performance_focus = getattr(weekly_snapshot, "performance_focus", None)
|
| 215 |
+
else:
|
| 216 |
+
performance_brief = getattr(context, "weekly_brief", None)
|
| 217 |
+
performance_focus = getattr(context, "weekly_focus", None)
|
| 218 |
+
|
| 219 |
+
# Fallbacks for scalar signals directly on context (useful for tests/minimal contexts)
|
| 220 |
+
# This aligns with the "aggregation layer" philosophy of the builder.
|
| 221 |
+
if not key_insight: key_insight = getattr(context, "key_insight", None)
|
| 222 |
+
if not forward_focus: forward_focus = getattr(context, "forward_focus", None)
|
| 223 |
+
if not training_state: training_state = getattr(context, "training_state", None)
|
| 224 |
+
if not health_signal: health_signal = getattr(context, "health_signal", None)
|
| 225 |
+
if not positioning_status: positioning_status = getattr(context, "positioning_status", None)
|
| 226 |
+
if not positioning_change: positioning_change = getattr(context, "positioning_change", None)
|
| 227 |
+
|
| 228 |
+
pos_view_safe = getattr(context, "positioning_view", None) or {}
|
| 229 |
+
if not goal_trajectory:
|
| 230 |
+
goal_trajectory = pos_view_safe.get("goal_trajectory") if isinstance(pos_view_safe, dict) else getattr(pos_view_safe, "goal_trajectory", None)
|
| 231 |
+
if not goal_trajectory:
|
| 232 |
+
goal_trajectory = getattr(context, "goal_trajectory", "NO_GOAL")
|
| 233 |
+
|
| 234 |
+
goal_prog_safe = getattr(context, "goal_progress", None) or {}
|
| 235 |
+
if goal_progress_pct is None:
|
| 236 |
+
goal_progress_pct = getattr(context, "goal_progress_pct", None)
|
| 237 |
+
if goal_progress_pct is None:
|
| 238 |
+
goal_progress_pct = goal_prog_safe.get("progress_percentage", 0) if isinstance(goal_prog_safe, dict) else getattr(goal_prog_safe, "progress_percentage", 0)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
week_charts = getattr(context, "charts", {}) or getattr(context.weekly_snapshot, "charts", {})
|
| 242 |
+
|
| 243 |
+
snapshot = RunnerIntelligenceSnapshot(
|
| 244 |
+
id=getattr(context.weekly_snapshot, "id", None),
|
| 245 |
+
week_start=_extract_summary_val("week_start", ["week_start_date", "week_start"], None),
|
| 246 |
+
|
| 247 |
+
training_state=training_state,
|
| 248 |
+
health_signal=health_signal,
|
| 249 |
+
|
| 250 |
+
positioning_status=positioning_status,
|
| 251 |
+
positioning_change=positioning_change,
|
| 252 |
+
|
| 253 |
+
goal_trajectory=goal_trajectory,
|
| 254 |
+
goal_progress_pct=goal_progress_pct,
|
| 255 |
+
|
| 256 |
+
next_run=next_run,
|
| 257 |
+
training_focus=training_focus,
|
| 258 |
+
training_type=training_type,
|
| 259 |
+
training_why=training_why,
|
| 260 |
+
|
| 261 |
+
key_insight=key_insight,
|
| 262 |
+
forward_focus=forward_focus,
|
| 263 |
+
|
| 264 |
+
performance_brief=performance_brief,
|
| 265 |
+
performance_focus=performance_focus,
|
| 266 |
+
|
| 267 |
+
weekly_distance_km=_extract_summary_val(
|
| 268 |
+
"total_distance_m",
|
| 269 |
+
["total_distance_km", "total_distance_m"],
|
| 270 |
+
0.0,
|
| 271 |
+
transform=lambda x: x / 1000.0 if not hasattr(summary, "total_distance_km") else x
|
| 272 |
+
),
|
| 273 |
+
num_runs=_extract_summary_val("num_runs", ["run_count", "num_runs"], 0),
|
| 274 |
+
run_count=_extract_summary_val("num_runs", ["run_count", "num_runs"], 0),
|
| 275 |
+
consistency_score=_extract_summary_val("consistency_score", ["consistency_score"], 0),
|
| 276 |
+
avg_pace=_extract_summary_val("avg_pace_s_per_km", ["avg_pace_sec_per_km", "avg_pace_s_per_km"], 0.0),
|
| 277 |
+
avg_hr=_extract_summary_val("avg_hr_bpm", ["avg_hr", "avg_hr_bpm"], 0.0),
|
| 278 |
+
structure_status=getattr(context.weekly_snapshot, "structure_status", {}) if context.weekly_snapshot else {},
|
| 279 |
+
|
| 280 |
+
# Detailed DTO components for UI transparency
|
| 281 |
+
# Week specific trend
|
| 282 |
+
trend=context.trends.model_dump() if getattr(context, "trends", None) and hasattr(context.trends, "model_dump") else (context.trends if getattr(context, "trends", None) else {}),
|
| 283 |
+
# Week over Weeks trend
|
| 284 |
+
weekly_trend=context.weekly_trend.model_dump() if getattr(context, "weekly_trend", None) and hasattr(context.weekly_trend, "model_dump") else (context.weekly_trend if getattr(context, "weekly_trend", None) else {}),
|
| 285 |
+
positioning=context.positioning.model_dump() if getattr(context, "positioning", None) and hasattr(context.positioning, "model_dump") else (context.positioning if getattr(context, "positioning", None) else {}),
|
| 286 |
+
positioning_view=context.positioning_view.model_dump() if getattr(context, "positioning_view", None) and hasattr(context.positioning_view, "model_dump") else (context.positioning_view if getattr(context, "positioning_view", None) else {}),
|
| 287 |
+
goal_trajectory_data=context.goal_trajectory.model_dump() if getattr(context, "goal_trajectory", None) and hasattr(context.goal_trajectory, "model_dump") else (context.goal_trajectory if getattr(context, "goal_trajectory", None) else {}),
|
| 288 |
+
insights=context.insights.model_dump() if getattr(context, "insights", None) and hasattr(context.insights, "model_dump") else (context.insights if getattr(context, "insights", None) else {}),
|
| 289 |
+
plan=getattr(context, "plan", None),
|
| 290 |
+
recommendation=context.recommendation.model_dump() if getattr(context, "recommendation", None) and hasattr(context.recommendation, "model_dump") else (context.recommendation if getattr(context, "recommendation", None) else {}),
|
| 291 |
+
charts=decode_chart(week_charts),
|
| 292 |
+
weekly_brief=performance_brief,
|
| 293 |
+
weekly_focus=performance_focus,
|
| 294 |
+
weekly_snapshot = context.weekly_snapshot if getattr(context, "weekly_snapshot", None) and hasattr(context.weekly_snapshot, "model_dump") else (context.weekly_snapshot if getattr(context, "weekly_snapshot", None) else {}),
|
| 295 |
+
active_goal = context.active_goal.model_dump() if getattr(context, "active_goal", None) and hasattr(context.active_goal, "model_dump") else (context.active_goal if getattr(context, "active_goal", None) else {}),
|
| 296 |
+
goal_view=context.goal_progress.model_dump() if getattr(context, "goal_progress", None) and hasattr(context.goal_progress, "model_dump") else (context.goal_progress if getattr(context, "goal_progress", None) else {}),
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
language = getattr(context, "language", "en")
|
| 300 |
+
|
| 301 |
+
snapshot.performance_story = _build_performance_story(snapshot, snapshot.weekly_trend, language)
|
| 302 |
+
snapshot.delta_summary = _build_delta_summary(snapshot.weekly_trend, language)
|
| 303 |
+
snapshot.evidence_view = _build_evidence_view(snapshot.positioning_view, snapshot.trend, language)
|
| 304 |
+
snapshot.structure_view = _build_structure_view(snapshot.structure_status, snapshot.recommendation, language)
|
| 305 |
+
snapshot.goal_status_text = _build_goal_status_text(snapshot.active_goal, language)
|
| 306 |
+
|
| 307 |
+
return snapshot
|
| 308 |
+
|
src/core/intelligence/intelligence_serializer.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tools.helpers import serialize_charts
|
| 2 |
+
|
| 3 |
+
def serialize_snapshot(snapshot):
|
| 4 |
+
if not snapshot:
|
| 5 |
+
return None
|
| 6 |
+
|
| 7 |
+
def _safe_dump(obj):
|
| 8 |
+
if obj is None: return None
|
| 9 |
+
if isinstance(obj, dict): return obj
|
| 10 |
+
if hasattr(obj, "model_dump"): return obj.model_dump()
|
| 11 |
+
return str(obj)
|
| 12 |
+
|
| 13 |
+
return {
|
| 14 |
+
"id": str(snapshot.id) if snapshot.id else None,
|
| 15 |
+
"week_start": str(snapshot.week_start) if snapshot.week_start else None,
|
| 16 |
+
"training_state": snapshot.training_state,
|
| 17 |
+
"health_signal": snapshot.health_signal,
|
| 18 |
+
"positioning_status": snapshot.positioning_status,
|
| 19 |
+
"positioning_change": snapshot.positioning_change,
|
| 20 |
+
"goal_trajectory": snapshot.goal_trajectory,
|
| 21 |
+
"goal_progress_pct": snapshot.goal_progress_pct,
|
| 22 |
+
"next_run": snapshot.next_run,
|
| 23 |
+
"training_focus": snapshot.training_focus,
|
| 24 |
+
"training_why": snapshot.training_why,
|
| 25 |
+
"key_insight": snapshot.key_insight,
|
| 26 |
+
"forward_focus": snapshot.forward_focus,
|
| 27 |
+
"weekly_distance_km": snapshot.weekly_distance_km,
|
| 28 |
+
"num_runs": snapshot.num_runs,
|
| 29 |
+
"run_count": snapshot.run_count,
|
| 30 |
+
"consistency_score": snapshot.consistency_score,
|
| 31 |
+
"avg_pace": snapshot.avg_pace,
|
| 32 |
+
"avg_hr": snapshot.avg_hr,
|
| 33 |
+
"structure_status": _safe_dump(snapshot.structure_status),
|
| 34 |
+
"performance_brief": snapshot.performance_brief,
|
| 35 |
+
"performance_focus": snapshot.performance_focus,
|
| 36 |
+
"trend": _safe_dump(snapshot.trend),
|
| 37 |
+
"weekly_trend": _safe_dump(snapshot.weekly_trend),
|
| 38 |
+
"positioning": _safe_dump(snapshot.positioning),
|
| 39 |
+
"positioning_view": _safe_dump(snapshot.positioning_view),
|
| 40 |
+
"goal_trajectory_data": _safe_dump(snapshot.goal_trajectory_data),
|
| 41 |
+
"insights": _safe_dump(snapshot.insights),
|
| 42 |
+
"plan": snapshot.plan,
|
| 43 |
+
"recommendation": _safe_dump(snapshot.recommendation),
|
| 44 |
+
"charts": serialize_charts(snapshot.charts),
|
| 45 |
+
"weekly_brief": snapshot.weekly_brief,
|
| 46 |
+
"weekly_focus": snapshot.weekly_focus,
|
| 47 |
+
"active_goal": _safe_dump(snapshot.active_goal),
|
| 48 |
+
"goal_view": _safe_dump(snapshot.goal_view),
|
| 49 |
+
}
|
src/core/intelligence/runner_intelligence_snapshot.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
from datetime import date
|
| 3 |
+
from typing import Optional, Dict
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
@dataclass
|
| 7 |
+
class RunnerIntelligenceSnapshot:
|
| 8 |
+
"""
|
| 9 |
+
Central intelligence artifact representing the current state of a runner.
|
| 10 |
+
|
| 11 |
+
This object serves as the canonical interface between the pipeline and:
|
| 12 |
+
- UI (Gradio)
|
| 13 |
+
- Integrations (Strava / WhatsApp)
|
| 14 |
+
- Observability analytics
|
| 15 |
+
|
| 16 |
+
It aggregates derived signals without altering pipeline logic.
|
| 17 |
+
Fields are raw values rather than translated text, allowing i18n
|
| 18 |
+
at the edge (e.g. UI).
|
| 19 |
+
"""
|
| 20 |
+
id: Optional[uuid.UUID] = field(default_factory=uuid.uuid4)
|
| 21 |
+
|
| 22 |
+
week_start: Optional[date] = None
|
| 23 |
+
|
| 24 |
+
# Training state and health
|
| 25 |
+
training_state: Optional[str] = None
|
| 26 |
+
health_signal: Optional[str] = None
|
| 27 |
+
|
| 28 |
+
# Positioning
|
| 29 |
+
positioning_status: Optional[str] = None
|
| 30 |
+
positioning_change: Optional[str] = None
|
| 31 |
+
|
| 32 |
+
# Goals
|
| 33 |
+
goal_trajectory: Optional[str] = None
|
| 34 |
+
goal_progress_pct: Optional[float] = None
|
| 35 |
+
|
| 36 |
+
# Advice / Future
|
| 37 |
+
next_run: Optional[str] = None
|
| 38 |
+
training_type: Optional[str] = None
|
| 39 |
+
training_focus: Optional[str] = None
|
| 40 |
+
training_why: Optional[str] = None
|
| 41 |
+
|
| 42 |
+
# Insights
|
| 43 |
+
key_insight: Optional[str] = None
|
| 44 |
+
forward_focus: Optional[str] = None
|
| 45 |
+
|
| 46 |
+
# Quantitative Metrics
|
| 47 |
+
weekly_distance_km: float = 0.0
|
| 48 |
+
num_runs: int = 0
|
| 49 |
+
run_count: int = 0
|
| 50 |
+
consistency_score: int = 0
|
| 51 |
+
avg_pace: Optional[float] = None
|
| 52 |
+
avg_hr: Optional[float] = None
|
| 53 |
+
|
| 54 |
+
# Structure
|
| 55 |
+
structure_status: Dict = field(default_factory=dict)
|
| 56 |
+
|
| 57 |
+
# Performance
|
| 58 |
+
performance_brief: Optional[str] = None
|
| 59 |
+
performance_focus: Optional[str] = None
|
| 60 |
+
|
| 61 |
+
# UI Presentation Ready Fields
|
| 62 |
+
performance_story: Optional[str] = None
|
| 63 |
+
delta_summary: Optional[Dict[str, str]] = None
|
| 64 |
+
evidence_view: Optional[str] = None
|
| 65 |
+
structure_view: Optional[str] = None
|
| 66 |
+
goal_status_text: Optional[str] = None
|
| 67 |
+
|
| 68 |
+
# Raw DTO data for UI transparency
|
| 69 |
+
trend: Dict = field(default_factory=dict)
|
| 70 |
+
weekly_trend: Dict = field(default_factory=dict)
|
| 71 |
+
positioning: Dict = field(default_factory=dict)
|
| 72 |
+
positioning_view: Dict = field(default_factory=dict)
|
| 73 |
+
goal_trajectory_data: Dict = field(default_factory=dict)
|
| 74 |
+
insights: Dict = field(default_factory=dict)
|
| 75 |
+
plan: Optional[str] = None
|
| 76 |
+
recommendation: Dict = field(default_factory=dict)
|
| 77 |
+
charts: Dict = field(default_factory=dict)
|
| 78 |
+
weekly_brief: Optional[str] = None
|
| 79 |
+
weekly_focus: Optional[str] = None
|
| 80 |
+
weekly_snapshot: Dict = field(default_factory=dict)
|
| 81 |
+
active_goal: Dict = field(default_factory=dict)
|
| 82 |
+
goal_view: Dict = field(default_factory=dict)
|
src/core/pipeline/context.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
from typing import List, Dict, Any, Optional
|
| 3 |
+
|
| 4 |
+
from domain.training.run import Run
|
| 5 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 6 |
+
from domain.training.weekly_trend import WeeklyTrend
|
| 7 |
+
from domain.runner.profile import RunnerProfile
|
| 8 |
+
from domain.training.training_recommendation import TrainingRecommendation
|
| 9 |
+
from domain.goals.goal_trajectory import GoalTrajectory
|
| 10 |
+
from ..intelligence.runner_intelligence_snapshot import RunnerIntelligenceSnapshot
|
| 11 |
+
|
| 12 |
+
from domain.runner_positioning import RunnerPositioning
|
| 13 |
+
from application.positioning_service import WeeklyPositioning
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class PipelineContext:
|
| 18 |
+
"""
|
| 19 |
+
Explicit, typed data contract for the Runner pipeline.
|
| 20 |
+
Eliminates dynamic field attachments and implicit state.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
# Inputs
|
| 24 |
+
runs: List[Any] = field(default_factory=list)
|
| 25 |
+
week_runs: List[Any] = field(default_factory=list)
|
| 26 |
+
language: str = "en"
|
| 27 |
+
target_monday: Optional[Any] = None # date
|
| 28 |
+
runner_profile: Optional[RunnerProfile] = None
|
| 29 |
+
active_goal: Optional[Any] = None
|
| 30 |
+
|
| 31 |
+
# Session state
|
| 32 |
+
sessions: Dict[Any, WeeklySnapshot] = field(default_factory=dict)
|
| 33 |
+
|
| 34 |
+
# Computed training data
|
| 35 |
+
weekly_snapshot: Optional[WeeklySnapshot] = None
|
| 36 |
+
weekly_trend: Optional[WeeklyTrend] = None
|
| 37 |
+
|
| 38 |
+
# Positioning layer
|
| 39 |
+
|
| 40 |
+
runner_positioning: Optional[RunnerPositioning] = None
|
| 41 |
+
positioning: Optional[WeeklyPositioning] = None
|
| 42 |
+
positioning_view: Optional[Any] = None
|
| 43 |
+
recommendation: Optional[TrainingRecommendation] = None
|
| 44 |
+
goal_trajectory: Optional[GoalTrajectory] = None
|
| 45 |
+
|
| 46 |
+
# Intelligence outputs
|
| 47 |
+
insights: Optional[Dict[str, Any]] = None
|
| 48 |
+
plan: Optional[str] = None
|
| 49 |
+
summary: Optional[Dict[str, Any]] = None
|
| 50 |
+
trends: Optional[Any] = None
|
| 51 |
+
risk_assessment: Optional[Any] = None
|
| 52 |
+
intelligence_snapshot: Optional[RunnerIntelligenceSnapshot] = None
|
| 53 |
+
|
| 54 |
+
# Intelligence Cache (for reuse)
|
| 55 |
+
last_runner_positioning: Optional[Any] = None
|
| 56 |
+
last_positioning: Optional[Any] = None
|
| 57 |
+
last_insights: Dict[str, Any] = field(default_factory=dict)
|
| 58 |
+
last_plan: Optional[str] = None
|
| 59 |
+
last_brief: Optional[str] = None
|
| 60 |
+
last_focus: Optional[str] = None
|
| 61 |
+
|
| 62 |
+
# Visualization
|
| 63 |
+
visualization_scope: str = "weekly" # "full" | "weekly"
|
| 64 |
+
charts: Dict[str, Any] = field(default_factory=dict)
|
| 65 |
+
|
| 66 |
+
# Context control
|
| 67 |
+
enable_intelligence: bool = True
|
| 68 |
+
intelligence_cache: Dict[str, bool] = field(default_factory=dict)
|
| 69 |
+
|
| 70 |
+
# Comparison
|
| 71 |
+
period_comparison: Optional[Any] = None
|
| 72 |
+
|
| 73 |
+
# Goal progress
|
| 74 |
+
goal_progress: Optional[Dict[str, Any]] = None
|
| 75 |
+
goal_view: Optional[Dict[str, Any]] = None
|
| 76 |
+
|
src/core/pipeline/pipeline.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
|
| 3 |
+
class RunnerPipeline:
|
| 4 |
+
def __init__(self, steps):
|
| 5 |
+
self.steps = steps
|
| 6 |
+
|
| 7 |
+
async def execute(self, context):
|
| 8 |
+
i = 0
|
| 9 |
+
while i < len(self.steps):
|
| 10 |
+
step = self.steps[i]
|
| 11 |
+
|
| 12 |
+
# Detect parallel block: IntelligenceStep followed by VisualizationStep
|
| 13 |
+
if (
|
| 14 |
+
step.__class__.__name__ == "IntelligenceStep"
|
| 15 |
+
and i + 1 < len(self.steps)
|
| 16 |
+
and self.steps[i + 1].__class__.__name__ == "VisualizationStep"
|
| 17 |
+
):
|
| 18 |
+
intelligence_step = step
|
| 19 |
+
visualization_step = self.steps[i + 1]
|
| 20 |
+
|
| 21 |
+
# 1. Parallelize InsightsAgent + PlanAgent + VisualizationAgent
|
| 22 |
+
tasks = []
|
| 23 |
+
|
| 24 |
+
# Extract parallel tasks from intelligence step (Insights + Plan)
|
| 25 |
+
if hasattr(intelligence_step, "run_parallel_agents"):
|
| 26 |
+
tasks.extend(intelligence_step.run_parallel_agents(context))
|
| 27 |
+
|
| 28 |
+
# Add visualization task
|
| 29 |
+
tasks.append(visualization_step.execute(context))
|
| 30 |
+
|
| 31 |
+
# Execute all independent agent tasks in parallel
|
| 32 |
+
if tasks:
|
| 33 |
+
await asyncio.gather(*tasks)
|
| 34 |
+
|
| 35 |
+
# 2. Run IntelligenceStep's remaining sequential logic (Brief + Cache)
|
| 36 |
+
# This will skip the already-completed agents due to checks in IntelligenceStep.run
|
| 37 |
+
await intelligence_step.execute(context)
|
| 38 |
+
|
| 39 |
+
i += 2
|
| 40 |
+
continue
|
| 41 |
+
|
| 42 |
+
await step.execute(context)
|
| 43 |
+
i += 1
|
src/core/pipeline/step.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from observability import logger as obs_logger
|
| 2 |
+
from observability import components as obs_components
|
| 3 |
+
|
| 4 |
+
class PipelineStep:
|
| 5 |
+
name = "pipeline_step"
|
| 6 |
+
|
| 7 |
+
async def execute(self, context):
|
| 8 |
+
with obs_logger.start_span(self.name, obs_components.ORCHESTRATOR):
|
| 9 |
+
await self.run(context)
|
| 10 |
+
|
| 11 |
+
async def run(self, context):
|
| 12 |
+
raise NotImplementedError
|
src/domain/goals/goal_trajectory.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from datetime import date
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@dataclass
|
| 6 |
+
class GoalTrajectory:
|
| 7 |
+
goal_name: str
|
| 8 |
+
target_date: date
|
| 9 |
+
status: str # on_track | ahead | behind | unknown
|
| 10 |
+
progress_pct: float
|
| 11 |
+
expected_weekly_km: float
|
| 12 |
+
current_weekly_km: float
|
| 13 |
+
next_milestone: str
|
src/domain/goals/goal_trajectory_engine.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from domain.goals.goal_trajectory import GoalTrajectory
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class GoalTrajectoryEngine:
|
| 5 |
+
|
| 6 |
+
def compute(
|
| 7 |
+
self, goal, weekly_snapshot, weekly_trend=None, runner_profile=None
|
| 8 |
+
):
|
| 9 |
+
|
| 10 |
+
if not goal or not weekly_snapshot:
|
| 11 |
+
return None
|
| 12 |
+
|
| 13 |
+
current_km = weekly_snapshot.total_distance_km or 0
|
| 14 |
+
|
| 15 |
+
target_val = getattr(goal, "target_value", 0)
|
| 16 |
+
unit = getattr(goal, "unit", "km")
|
| 17 |
+
goal_type = getattr(goal, "type", "goal")
|
| 18 |
+
name = f"{goal_type} {target_val} {unit}"
|
| 19 |
+
# ---- Weekly Volume Goal ----
|
| 20 |
+
if getattr(goal, "type", None) == "volume":
|
| 21 |
+
|
| 22 |
+
target_km = goal.target_value
|
| 23 |
+
progress_pct = (current_km / target_km) * 100 if target_km else 0
|
| 24 |
+
|
| 25 |
+
if current_km >= target_km * 1.1:
|
| 26 |
+
status = "ahead"
|
| 27 |
+
elif current_km >= target_km * 0.9:
|
| 28 |
+
status = "on_track"
|
| 29 |
+
else:
|
| 30 |
+
status = "behind"
|
| 31 |
+
|
| 32 |
+
milestone = f"Maintain ≥{target_km:.1f} km/week"
|
| 33 |
+
|
| 34 |
+
return GoalTrajectory(
|
| 35 |
+
goal_name=name,
|
| 36 |
+
target_date=getattr(goal, "target_date", None),
|
| 37 |
+
status=status,
|
| 38 |
+
progress_pct=progress_pct,
|
| 39 |
+
expected_weekly_km=target_km,
|
| 40 |
+
current_weekly_km=current_km,
|
| 41 |
+
next_milestone=milestone
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# ---- Race Goal ----
|
| 45 |
+
|
| 46 |
+
if getattr(goal, "type", None) == "race":
|
| 47 |
+
|
| 48 |
+
target_km = goal.target_value
|
| 49 |
+
baseline = None
|
| 50 |
+
|
| 51 |
+
if runner_profile and runner_profile.baseline_weekly_km:
|
| 52 |
+
baseline = runner_profile.baseline_weekly_km
|
| 53 |
+
else:
|
| 54 |
+
baseline = current_km
|
| 55 |
+
|
| 56 |
+
expected_weekly_km = max(baseline, target_km * 1.5)
|
| 57 |
+
|
| 58 |
+
progress_pct = (
|
| 59 |
+
(current_km / expected_weekly_km) * 100 if expected_weekly_km else 0
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if current_km >= expected_weekly_km * 1.1:
|
| 63 |
+
status = "ahead"
|
| 64 |
+
elif current_km >= expected_weekly_km * 0.9:
|
| 65 |
+
status = "on_track"
|
| 66 |
+
else:
|
| 67 |
+
status = "behind"
|
| 68 |
+
|
| 69 |
+
milestone = f"Reach ~{expected_weekly_km:.1f} km/week"
|
| 70 |
+
|
| 71 |
+
return GoalTrajectory(
|
| 72 |
+
goal_name=name,
|
| 73 |
+
target_date=getattr(goal, "target_date", None),
|
| 74 |
+
status=status,
|
| 75 |
+
progress_pct=progress_pct / 100.0,
|
| 76 |
+
expected_weekly_km=expected_weekly_km,
|
| 77 |
+
current_weekly_km=current_km,
|
| 78 |
+
next_milestone=milestone
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
return None
|
src/domain/runner/goal.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from dataclasses import dataclass, field
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from typing import Optional, Literal
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclass
|
| 8 |
+
class Goal:
|
| 9 |
+
id: uuid.UUID
|
| 10 |
+
runner_id: uuid.UUID
|
| 11 |
+
type: str # "race", "volume", "pace"
|
| 12 |
+
target_value: float
|
| 13 |
+
unit: str # "km", "min", "sec_per_km"
|
| 14 |
+
target_date: Optional[datetime] = None
|
| 15 |
+
status: Literal["active", "completed", "archived"] = "active"
|
| 16 |
+
created_at: datetime = field(default_factory=datetime.now)
|
| 17 |
+
achieved_at: Optional[datetime] = None
|
| 18 |
+
|
| 19 |
+
def model_dump(self):
|
| 20 |
+
return {
|
| 21 |
+
"id": str(self.id),
|
| 22 |
+
"runner_id": str(self.runner_id),
|
| 23 |
+
"type": self.type,
|
| 24 |
+
"target_value": self.target_value,
|
| 25 |
+
"unit": self.unit,
|
| 26 |
+
"target_date": self.target_date.isoformat() if self.target_date else None,
|
| 27 |
+
"status": self.status,
|
| 28 |
+
"created_at": self.created_at.isoformat(),
|
| 29 |
+
"achieved_at": self.achieved_at.isoformat() if self.achieved_at else None,
|
| 30 |
+
}
|
src/domain/runner/profile.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Literal
|
| 2 |
+
from pydantic import BaseModel, Field, field_validator
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class RunnerProfile(BaseModel):
|
| 8 |
+
"""Digital Twin model for persistent runner data."""
|
| 9 |
+
|
| 10 |
+
runner_id: uuid.UUID
|
| 11 |
+
runner_display_name: Optional[str] = None
|
| 12 |
+
|
| 13 |
+
age: Optional[int] = None
|
| 14 |
+
sex: Optional[Literal["male", "female", "other"]] = None
|
| 15 |
+
|
| 16 |
+
experience_level: Optional[Literal["beginner", "intermediate", "advanced"]] = None
|
| 17 |
+
|
| 18 |
+
baseline_weekly_km: Optional[float] = None
|
| 19 |
+
gender: Optional[str] = Field(default=None)
|
| 20 |
+
|
| 21 |
+
injury_history_notes: Optional[str] = None
|
| 22 |
+
|
| 23 |
+
created_at: datetime = Field(default_factory=datetime.now)
|
| 24 |
+
updated_at: datetime = Field(default_factory=datetime.now)
|
| 25 |
+
|
| 26 |
+
@field_validator("gender")
|
| 27 |
+
@classmethod
|
| 28 |
+
def normalize_gender(cls, v: Optional[str]) -> Optional[str]:
|
| 29 |
+
if v not in ["male", "female", "other", "prefer_not_to_say"]:
|
| 30 |
+
return None
|
| 31 |
+
return v
|
src/domain/runner_positioning.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Literal
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from datetime import date
|
| 4 |
+
from enum import Enum
|
| 5 |
+
|
| 6 |
+
class TrainingPhase(str, Enum):
|
| 7 |
+
BASE = "base"
|
| 8 |
+
BUILD = "build"
|
| 9 |
+
PEAK = "peak"
|
| 10 |
+
RECOVERY = "recovery"
|
| 11 |
+
PLATEAU = "plateau"
|
| 12 |
+
|
| 13 |
+
class RunnerPositioning(BaseModel):
|
| 14 |
+
"""
|
| 15 |
+
Domain model for representing a runner's positioning assessment.
|
| 16 |
+
This is a deterministic interpretation of snapshots, trends, and goals.
|
| 17 |
+
"""
|
| 18 |
+
week_start_date: date
|
| 19 |
+
|
| 20 |
+
# Core signals
|
| 21 |
+
health_signal: Literal["RECOVERING", "OPTIMAL", "OVERREACHING", "UNKNOWN"]
|
| 22 |
+
position_status: Literal["AHEAD", "ON_TRACK", "FALLING_BEHIND", "UNKNOWN"]
|
| 23 |
+
goal_trajectory: Literal["IMPROVING", "STABLE", "DECLINING", "UNKNOWN"]
|
| 24 |
+
recommended_focus: Literal["RECOVERY", "CONSISTENCY", "INTENSITY", "MAINTENANCE"]
|
| 25 |
+
training_phase: TrainingPhase
|
| 26 |
+
|
| 27 |
+
# Metadata
|
| 28 |
+
comparison_available: bool = False
|
| 29 |
+
signal_strength: float = 1.0
|
| 30 |
+
llm_used: bool = False
|
| 31 |
+
summary: Optional[str] = None
|
| 32 |
+
|
| 33 |
+
@classmethod
|
| 34 |
+
def compute(
|
| 35 |
+
cls,
|
| 36 |
+
week_start: date,
|
| 37 |
+
total_distance: float,
|
| 38 |
+
target_distance: Optional[float],
|
| 39 |
+
consistency_score: float,
|
| 40 |
+
pace_delta: float,
|
| 41 |
+
hr_delta: Optional[float],
|
| 42 |
+
distance_delta_pct: float,
|
| 43 |
+
comparison_available: bool,
|
| 44 |
+
training_phase: TrainingPhase = TrainingPhase.BASE
|
| 45 |
+
) -> "RunnerPositioning":
|
| 46 |
+
"""
|
| 47 |
+
Pure deterministic logic to compute positioning signals.
|
| 48 |
+
No imports from outside the domain layer allowed.
|
| 49 |
+
"""
|
| 50 |
+
# ... (logic remains same, passed as arg from service)
|
| 51 |
+
# 1. Health Signal
|
| 52 |
+
if distance_delta_pct > 20.0 or (hr_delta is not None and hr_delta > 5.0):
|
| 53 |
+
health_signal = "OVERREACHING"
|
| 54 |
+
elif consistency_score < 0.6:
|
| 55 |
+
health_signal = "RECOVERING"
|
| 56 |
+
elif consistency_score > 0.8 and (hr_delta is None or hr_delta <= 0):
|
| 57 |
+
health_signal = "OPTIMAL"
|
| 58 |
+
else:
|
| 59 |
+
health_signal = "UNKNOWN"
|
| 60 |
+
|
| 61 |
+
# 2. Position Status (relative to goal distance)
|
| 62 |
+
if target_distance and target_distance > 0:
|
| 63 |
+
diff_pct = (total_distance - target_distance) / target_distance
|
| 64 |
+
if diff_pct > 0.1:
|
| 65 |
+
position_status = "AHEAD"
|
| 66 |
+
elif diff_pct < -0.1:
|
| 67 |
+
position_status = "FALLING_BEHIND"
|
| 68 |
+
else:
|
| 69 |
+
position_status = "ON_TRACK"
|
| 70 |
+
else:
|
| 71 |
+
position_status = "UNKNOWN"
|
| 72 |
+
|
| 73 |
+
# 3. Goal Trajectory
|
| 74 |
+
if pace_delta < -5.0 and consistency_score > 0.7:
|
| 75 |
+
goal_trajectory = "IMPROVING"
|
| 76 |
+
elif abs(pace_delta) <= 5.0 and consistency_score > 0.6:
|
| 77 |
+
goal_trajectory = "STABLE"
|
| 78 |
+
elif pace_delta > 5.0 or consistency_score < 0.5:
|
| 79 |
+
goal_trajectory = "DECLINING"
|
| 80 |
+
else:
|
| 81 |
+
goal_trajectory = "UNKNOWN"
|
| 82 |
+
|
| 83 |
+
# 4. Recommended Focus
|
| 84 |
+
if health_signal == "OVERREACHING":
|
| 85 |
+
recommended_focus = "RECOVERY"
|
| 86 |
+
elif consistency_score < 0.7:
|
| 87 |
+
recommended_focus = "CONSISTENCY"
|
| 88 |
+
elif health_signal == "OPTIMAL" and goal_trajectory != "DECLINING":
|
| 89 |
+
recommended_focus = "INTENSITY"
|
| 90 |
+
else:
|
| 91 |
+
recommended_focus = "MAINTENANCE"
|
| 92 |
+
|
| 93 |
+
return cls(
|
| 94 |
+
week_start_date=week_start,
|
| 95 |
+
health_signal=health_signal,
|
| 96 |
+
position_status=position_status,
|
| 97 |
+
goal_trajectory=goal_trajectory,
|
| 98 |
+
recommended_focus=recommended_focus,
|
| 99 |
+
comparison_available=comparison_available,
|
| 100 |
+
training_phase=training_phase
|
| 101 |
+
)
|
src/domain/training/agent_models.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from datetime import datetime, date
|
| 3 |
+
from typing import Dict, Any, Optional, Union, List, Literal
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Insight(BaseModel):
|
| 8 |
+
"""A single insight generated by the agent."""
|
| 9 |
+
|
| 10 |
+
message: str
|
| 11 |
+
value: Optional[Union[float, int, str]] = None
|
| 12 |
+
raw_data: Dict[str, Any] = Field(default_factory=dict)
|
| 13 |
+
evidence: Optional[List[str]] = None
|
| 14 |
+
constraint: Optional[str] = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class InsightsOutput(BaseModel):
|
| 18 |
+
"""Output from the InsightsAgent."""
|
| 19 |
+
|
| 20 |
+
primary_lever: Insight
|
| 21 |
+
risk_signal: Insight
|
| 22 |
+
key_observations: List[Insight]
|
| 23 |
+
summary: Optional[Insight] = None
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PlanOutput(BaseModel):
|
| 27 |
+
"""Output from the PlanAgent."""
|
| 28 |
+
|
| 29 |
+
plan: str
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class RiskAssessment(BaseModel):
|
| 33 |
+
"""Assessment of injury or fatigue risk based on heuristics."""
|
| 34 |
+
|
| 35 |
+
risk_level: Literal["LOW", "MEDIUM", "HIGH"]
|
| 36 |
+
reasons: List[str] = Field(default_factory=list)
|
| 37 |
+
recommended_adjustments: List[str] = Field(default_factory=list)
|
| 38 |
+
metrics_used: Dict[str, Any] = Field(default_factory=dict)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class WeeklyTrends(BaseModel):
|
| 42 |
+
"""Weekly trend metrics used for analysis."""
|
| 43 |
+
|
| 44 |
+
pace_trend_s_per_km: Optional[float] = None
|
| 45 |
+
distance_trend_m: Optional[float] = None
|
| 46 |
+
avg_runs_per_week: Optional[float] = None
|
| 47 |
+
run_monotony: Optional[float] = None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class WeeklySummary(BaseModel):
|
| 51 |
+
"""Aggregated metrics for the current coaching session."""
|
| 52 |
+
|
| 53 |
+
week_start: Optional[date] = None
|
| 54 |
+
weekly_km: Dict[str, float] = Field(default_factory=dict)
|
| 55 |
+
consistency_score: int = 50
|
| 56 |
+
num_runs: int = 0
|
| 57 |
+
total_distance_m: float = 0.0
|
| 58 |
+
avg_hr_bpm: Optional[float] = None
|
| 59 |
+
avg_pace_s_per_km: float = 0.0
|
| 60 |
+
total_duration_s: int = 0
|
| 61 |
+
performance_brief: Optional[str] = None
|
| 62 |
+
performance_focus: Optional[str] = None
|
| 63 |
+
structure_status: Optional[dict] = None
|
| 64 |
+
brief_source_hash: Optional[str] = None
|
| 65 |
+
brief_generated_at: Optional[datetime] = None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class AnalysisRecord(BaseModel):
|
| 69 |
+
"""Full record of an analysis run."""
|
| 70 |
+
|
| 71 |
+
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
| 72 |
+
created_at: datetime = Field(default_factory=datetime.now)
|
| 73 |
+
source_files: List[str] = Field(default_factory=list)
|
| 74 |
+
formats: List[str] = Field(default_factory=list)
|
| 75 |
+
run_summary: Dict[str, Any] = Field(default_factory=dict)
|
| 76 |
+
run_timeseries: List[Dict[str, Any]] = Field(default_factory=list)
|
| 77 |
+
insights_json: Dict[str, Any] = Field(default_factory=dict)
|
| 78 |
+
plan_json: Dict[str, Any] = Field(default_factory=dict)
|
| 79 |
+
route_json: Dict[str, Any] = Field(default_factory=dict)
|
src/domain/training/charts.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
from typing import Optional, Dict, Any
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ChartType(str, Enum):
|
| 7 |
+
"""Supported chart types."""
|
| 8 |
+
|
| 9 |
+
PACE = "pace"
|
| 10 |
+
HEART_RATE = "heart_rate"
|
| 11 |
+
VOLUME = "volume"
|
| 12 |
+
FREQUENCY = "frequency"
|
| 13 |
+
ZONES = "zones"
|
| 14 |
+
DYNAMIC = "dynamic"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ChartSpec(BaseModel):
|
| 18 |
+
"""A specification for a chart to be rendered."""
|
| 19 |
+
|
| 20 |
+
chart_type: ChartType
|
| 21 |
+
title: Optional[str] = None
|
| 22 |
+
params: Dict[str, Any] = Field(default_factory=dict)
|
src/domain/training/period_comparison.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
@dataclass
|
| 5 |
+
class PeriodComparison:
|
| 6 |
+
period_a_label: str
|
| 7 |
+
period_b_label: str
|
| 8 |
+
distance_delta_pct: float
|
| 9 |
+
avg_pace_delta_s_per_km: float
|
| 10 |
+
frequency_delta: int
|
| 11 |
+
avg_hr_delta: float
|
| 12 |
+
consistency_delta: float
|
src/domain/training/planned_session.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal, Optional
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from datetime import datetime, date
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class PlannedSession(BaseModel):
|
| 8 |
+
"""
|
| 9 |
+
Represents a planned running session for a specific week.
|
| 10 |
+
Goal-informed and editable.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4)
|
| 14 |
+
runner_id: uuid.UUID
|
| 15 |
+
week_start_date: date
|
| 16 |
+
session_type: Literal["weekday", "long_run"]
|
| 17 |
+
planned_date: date
|
| 18 |
+
target_distance_km: float
|
| 19 |
+
completed_run_id: Optional[str] = None
|
| 20 |
+
created_at: datetime = Field(default_factory=datetime.now)
|
| 21 |
+
|
| 22 |
+
class Config:
|
| 23 |
+
frozen = False # Allow mutation for completed_run_id
|
src/domain/training/run.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Dict, Any, List
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Run(BaseModel):
|
| 7 |
+
"""Domain model for a single run."""
|
| 8 |
+
|
| 9 |
+
id: Optional[str] = None
|
| 10 |
+
start_time: Optional[datetime] = None
|
| 11 |
+
total_distance_m: float = 0.0
|
| 12 |
+
total_duration_s: float = 0.0
|
| 13 |
+
avg_pace_min_per_km: Optional[str] = None
|
| 14 |
+
avg_hr_bpm: Optional[float] = None
|
| 15 |
+
max_hr_bpm: Optional[float] = None
|
| 16 |
+
elevation_gain_m: float = 0.0
|
| 17 |
+
source_path: Optional[str] = None
|
| 18 |
+
|
| 19 |
+
# Raw records if needed for further processing
|
| 20 |
+
records: List[Dict[str, Any]] = []
|
| 21 |
+
|
| 22 |
+
class Config:
|
| 23 |
+
extra = "ignore"
|
src/domain/training/training_recommendation.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
|
| 4 |
+
class TrainingRecommendation(BaseModel):
|
| 5 |
+
"""
|
| 6 |
+
Domain model for training recommendations.
|
| 7 |
+
Deterministic suggestion based on positioning and performance trends.
|
| 8 |
+
"""
|
| 9 |
+
focus: str
|
| 10 |
+
session_type: Optional[str] = None
|
| 11 |
+
description: str
|
| 12 |
+
confidence: float
|
src/domain/training/trend_snapshot.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
import uuid
|
| 4 |
+
from datetime import datetime, UTC, date
|
| 5 |
+
|
| 6 |
+
class TrendSnapshot(BaseModel):
|
| 7 |
+
"""
|
| 8 |
+
Represents calculated deltas between weekly snapshots.
|
| 9 |
+
Used for longitudinal trend analysis.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4)
|
| 13 |
+
runner_id: uuid.UUID
|
| 14 |
+
week_start_date: date
|
| 15 |
+
comparison_type: str = "week_over_week"
|
| 16 |
+
reference_week_start_date: Optional[date] = None
|
| 17 |
+
|
| 18 |
+
@property
|
| 19 |
+
def comparison_available(self) -> bool:
|
| 20 |
+
"""Returns True if a reference week is available for comparison."""
|
| 21 |
+
return self.reference_week_start_date is not None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
distance_delta_km: Optional[float] = None
|
| 25 |
+
avg_pace_delta_s_per_km: Optional[float] = None
|
| 26 |
+
avg_hr_delta: Optional[float] = None
|
| 27 |
+
runs_count_delta: Optional[int] = None
|
| 28 |
+
consistency_delta: Optional[float] = None
|
| 29 |
+
|
| 30 |
+
engine_version: str
|
| 31 |
+
computed_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
| 32 |
+
|
| 33 |
+
@classmethod
|
| 34 |
+
def empty(cls, runner_id: uuid.UUID, week_start_date: date) -> "TrendSnapshot":
|
| 35 |
+
"""Returns a null trend snapshot with all numeric fields set to None."""
|
| 36 |
+
return cls(
|
| 37 |
+
runner_id=runner_id,
|
| 38 |
+
week_start_date=week_start_date,
|
| 39 |
+
comparison_type="week_over_week",
|
| 40 |
+
reference_week_start_date=None,
|
| 41 |
+
distance_delta_km=None,
|
| 42 |
+
avg_pace_delta_s_per_km=None,
|
| 43 |
+
avg_hr_delta=None,
|
| 44 |
+
runs_count_delta=None,
|
| 45 |
+
consistency_delta=None,
|
| 46 |
+
engine_version="null",
|
| 47 |
+
computed_at=datetime.now(UTC),
|
| 48 |
+
)
|
src/domain/training/weekly_snapshot.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from datetime import datetime, date
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class WeeklySnapshot(BaseModel):
|
| 8 |
+
"""Aggregated weekly snapshot for long-term tracking."""
|
| 9 |
+
|
| 10 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4)
|
| 11 |
+
runner_id: uuid.UUID
|
| 12 |
+
week_start_date: date
|
| 13 |
+
total_distance_km: float
|
| 14 |
+
avg_pace_sec_per_km: float
|
| 15 |
+
avg_hr: Optional[float] = None
|
| 16 |
+
max_hr: Optional[float] = None
|
| 17 |
+
total_time_sec: int
|
| 18 |
+
run_count: int
|
| 19 |
+
consistency_score: float
|
| 20 |
+
performance_brief: Optional[str] = None
|
| 21 |
+
performance_focus: Optional[str] = None
|
| 22 |
+
brief_generated_at: Optional[datetime] = None
|
| 23 |
+
brief_source_hash: Optional[str] = None
|
| 24 |
+
structure_status: Optional[dict] = None
|
| 25 |
+
charts: Optional[dict] = None
|
| 26 |
+
created_at: datetime = Field(default_factory=datetime.now)
|
| 27 |
+
|
| 28 |
+
@classmethod
|
| 29 |
+
def from_metrics(
|
| 30 |
+
cls,
|
| 31 |
+
runner_id: uuid.UUID,
|
| 32 |
+
week_start_date: date,
|
| 33 |
+
total_distance_km: float,
|
| 34 |
+
avg_pace_sec_per_km: float,
|
| 35 |
+
total_time_sec: int,
|
| 36 |
+
run_count: int,
|
| 37 |
+
consistency_score: float,
|
| 38 |
+
avg_hr: Optional[float] = None,
|
| 39 |
+
max_hr: Optional[float] = None,
|
| 40 |
+
performance_brief: Optional[str] = None,
|
| 41 |
+
brief_generated_at: Optional[datetime] = None,
|
| 42 |
+
brief_source_hash: Optional[str] = None,
|
| 43 |
+
structure_status: Optional[dict] = None,
|
| 44 |
+
charts: Optional[dict] = None,
|
| 45 |
+
) -> "WeeklySnapshot":
|
| 46 |
+
"""Construct a snapshot from aggregated weekly metrics."""
|
| 47 |
+
return cls(
|
| 48 |
+
runner_id=runner_id,
|
| 49 |
+
week_start_date=week_start_date,
|
| 50 |
+
total_distance_km=total_distance_km,
|
| 51 |
+
avg_pace_sec_per_km=avg_pace_sec_per_km,
|
| 52 |
+
total_time_sec=total_time_sec,
|
| 53 |
+
run_count=run_count,
|
| 54 |
+
consistency_score=consistency_score,
|
| 55 |
+
avg_hr=avg_hr,
|
| 56 |
+
max_hr=max_hr,
|
| 57 |
+
performance_brief=performance_brief,
|
| 58 |
+
brief_generated_at=brief_generated_at,
|
| 59 |
+
brief_source_hash=brief_source_hash,
|
| 60 |
+
structure_status=structure_status,
|
| 61 |
+
charts=charts,
|
| 62 |
+
)
|
src/domain/training/weekly_snapshot_builder.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
from datetime import date
|
| 3 |
+
import uuid
|
| 4 |
+
from domain.training.run import Run
|
| 5 |
+
from domain.training.weekly_snapshot import WeeklySnapshot
|
| 6 |
+
|
| 7 |
+
class WeeklySnapshotBuilder:
|
| 8 |
+
"""
|
| 9 |
+
Pure domain component to build WeeklySnapshot objects from raw runs.
|
| 10 |
+
Deterministic, no side effects, no repository dependencies.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def build(
|
| 14 |
+
self,
|
| 15 |
+
runner_id: uuid.UUID,
|
| 16 |
+
week_start_date: date,
|
| 17 |
+
runs: List[Run]
|
| 18 |
+
) -> WeeklySnapshot:
|
| 19 |
+
"""
|
| 20 |
+
Aggregates a list of runs into a weekly snapshot for a specific week.
|
| 21 |
+
"""
|
| 22 |
+
if not runs:
|
| 23 |
+
return WeeklySnapshot.from_metrics(
|
| 24 |
+
runner_id=runner_id,
|
| 25 |
+
week_start_date=week_start_date,
|
| 26 |
+
total_distance_km=0.0,
|
| 27 |
+
avg_pace_sec_per_km=0.0,
|
| 28 |
+
total_time_sec=0,
|
| 29 |
+
run_count=0,
|
| 30 |
+
consistency_score=0.0,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
total_distance_m = sum(run.total_distance_m for run in runs)
|
| 34 |
+
total_duration_s = sum(run.total_duration_s for run in runs)
|
| 35 |
+
total_distance_km = total_distance_m / 1000.0
|
| 36 |
+
|
| 37 |
+
avg_pace_sec_per_km = 0.0
|
| 38 |
+
if total_distance_km > 0:
|
| 39 |
+
avg_pace_sec_per_km = total_duration_s / total_distance_km
|
| 40 |
+
|
| 41 |
+
hr_values = [run.avg_hr_bpm for run in runs if run.avg_hr_bpm is not None]
|
| 42 |
+
avg_hr = sum(hr_values) / len(hr_values) if hr_values else None
|
| 43 |
+
|
| 44 |
+
max_hr_values = [run.max_hr_bpm for run in runs if run.max_hr_bpm is not None]
|
| 45 |
+
max_hr = max(max_hr_values) if max_hr_values else None
|
| 46 |
+
|
| 47 |
+
# Simple consistency metric: 5 runs = 100% (min 1, max 5)
|
| 48 |
+
consistency_score = float(min(len(runs) * 20, 100))
|
| 49 |
+
|
| 50 |
+
return WeeklySnapshot.from_metrics(
|
| 51 |
+
runner_id=runner_id,
|
| 52 |
+
week_start_date=week_start_date,
|
| 53 |
+
total_distance_km=total_distance_km,
|
| 54 |
+
avg_pace_sec_per_km=avg_pace_sec_per_km,
|
| 55 |
+
total_time_sec=int(total_duration_s),
|
| 56 |
+
run_count=len(runs),
|
| 57 |
+
consistency_score=consistency_score,
|
| 58 |
+
avg_hr=avg_hr,
|
| 59 |
+
max_hr=max_hr,
|
| 60 |
+
)
|