ViniciusKhan commited on
Commit
ae4762b
·
1 Parent(s): 65154d2

Add application file

Browse files
Files changed (4) hide show
  1. .gitignore +146 -0
  2. Dockerfile +31 -0
  3. main.py +1031 -0
  4. requirements.txt +10 -0
.gitignore ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================
2
+ # LifeOps / FastAPI + Frontend (GitHub + Hugging Face Spaces)
3
+ # ============================================================
4
+
5
+ # ----------------------------
6
+ # Segredos / configs locais
7
+ # ----------------------------
8
+ .env
9
+ .env.*
10
+ !.env.example
11
+ *.pem
12
+ *.key
13
+ secrets*
14
+ **/secrets*
15
+ **/*secret*
16
+ *.secret
17
+
18
+ # ----------------------------
19
+ # Python
20
+ # ----------------------------
21
+ __pycache__/
22
+ *.py[cod]
23
+ *$py.class
24
+ *.pyo
25
+ *.pyd
26
+ *.so
27
+ *.egg-info/
28
+ .eggs/
29
+ build/
30
+ dist/
31
+ pip-wheel-metadata/
32
+ .pytest_cache/
33
+ .coverage
34
+ .coverage.*
35
+ htmlcov/
36
+ .mypy_cache/
37
+ .ruff_cache/
38
+ .pytype/
39
+ .pyre/
40
+ .tox/
41
+ .nox/
42
+ .cache/
43
+ .venv/
44
+ venv/
45
+ ENV/
46
+ env/
47
+ .conda/
48
+ poetry.lock
49
+ Pipfile.lock
50
+
51
+ # Jupyter
52
+ .ipynb_checkpoints/
53
+
54
+ # IDEs / editores
55
+ .vscode/
56
+ .idea/
57
+ *.iml
58
+
59
+ # ----------------------------
60
+ # Node / Frontend
61
+ # ----------------------------
62
+ node_modules/
63
+ npm-debug.log*
64
+ yarn-debug.log*
65
+ yarn-error.log*
66
+ pnpm-debug.log*
67
+ *.tsbuildinfo
68
+ .next/
69
+ out/
70
+ dist/
71
+ build/
72
+ .cache/
73
+
74
+ # ----------------------------
75
+ # Logs
76
+ # ----------------------------
77
+ *.log
78
+ logs/
79
+ **/logs/
80
+
81
+ # ----------------------------
82
+ # Sistema operacional
83
+ # ----------------------------
84
+ .DS_Store
85
+ Thumbs.db
86
+ desktop.ini
87
+
88
+ # ----------------------------
89
+ # Banco local / dados (CRÍTICO)
90
+ # ----------------------------
91
+ # Se você usa SQLite local, NÃO suba pro Git
92
+ *.db
93
+ *.sqlite
94
+ *.sqlite3
95
+ *.db-shm
96
+ *.db-wal
97
+
98
+ # Pastas típicas de dados locais
99
+ data/
100
+ backend/data/
101
+ **/data/
102
+
103
+ # Backups/exportações
104
+ *.bak
105
+ *.backup
106
+ *.dump
107
+ *.sql.gz
108
+ *.zip
109
+
110
+ # ----------------------------
111
+ # Hugging Face Spaces / Deploy
112
+ # ----------------------------
113
+ # Evita lixo e arquivos de runtime
114
+ __HF_TMP__/
115
+ tmp/
116
+ temp/
117
+ *.tmp
118
+
119
+ # Se usar Gradio/Spaces com cache local
120
+ .gradio/
121
+ hf_cache/
122
+ .hf_cache/
123
+ **/.hf_cache/
124
+
125
+ # ----------------------------
126
+ # Arquivos grandes / mídia (opcional)
127
+ # ----------------------------
128
+ # Descomente se você NÃO quiser versionar relatórios/PDFs e mídias
129
+ # *.pdf
130
+ # *.mp4
131
+ # *.mov
132
+ # *.avi
133
+ # *.mkv
134
+
135
+ # ----------------------------
136
+ # Permitir arquivos importantes de exemplo
137
+ # ----------------------------
138
+ !README.md
139
+ !requirements.txt
140
+ !pyproject.toml
141
+ !Dockerfile
142
+ !docker-compose.yml
143
+ !app.py
144
+ !main.py
145
+ !backend/main.py
146
+ !backend/app.py
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile — Hugging Face Spaces (Docker)
2
+ # Objetivo: rodar FastAPI em 0.0.0.0:7860 (padrão do HF)
3
+
4
+ FROM python:3.11-slim
5
+
6
+ ENV PYTHONDONTWRITEBYTECODE=1 \
7
+ PYTHONUNBUFFERED=1
8
+
9
+ WORKDIR /app
10
+
11
+ # Dependências de sistema (mínimo)
12
+ # (Mantém enxuto; adicione gcc/build-essential se sua lib exigir compilação)
13
+ RUN apt-get update && apt-get install -y --no-install-recommends \
14
+ ca-certificates \
15
+ && rm -rf /var/lib/apt/lists/*
16
+
17
+ # Instala requirements
18
+ COPY requirements.txt .
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # Copia código
22
+ COPY . .
23
+
24
+ # Garante pasta de dados local (SQLite/replica)
25
+ RUN mkdir -p /app/data
26
+
27
+ # Porta padrão do Spaces (Docker)
28
+ EXPOSE 7860
29
+
30
+ # HF define $PORT; fallback 7860
31
+ CMD ["bash", "-lc", "uvicorn main:app --host 0.0.0.0 --port ${PORT:-7860}"]
main.py ADDED
@@ -0,0 +1,1031 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LifeOps API — FastAPI + Turso (libSQL) via Embedded Replica + Gemini Coach (Snix)
3
+
4
+ - Persistência local: SQLite (DB_FILE)
5
+ - Sync opcional: Turso (TURSO_DATABASE_URL + TURSO_AUTH_TOKEN)
6
+ - Endpoints: /state, /logs, /settings, /health, /llm/models
7
+ - Coach IA (Snix): POST /coach/snix (Gemini via .env), com cache + retry + fallback
8
+
9
+ Requisitos (no requirements.txt):
10
+ - fastapi
11
+ - uvicorn[standard]
12
+ - python-dotenv
13
+ - pydantic
14
+ - libsql (ou libsql-client compatível com `libsql.connect`)
15
+ """
16
+
17
+ import os
18
+ import json
19
+ import threading
20
+ import time
21
+ import random
22
+ from typing import Any, Dict, Optional, List, Tuple
23
+ from datetime import datetime, date, timedelta
24
+
25
+ from dotenv import load_dotenv
26
+ from fastapi import FastAPI, HTTPException
27
+ from fastapi.middleware.cors import CORSMiddleware
28
+ from pydantic import BaseModel, Field, ConfigDict
29
+
30
+ from urllib import request as urlrequest
31
+ from urllib.error import URLError, HTTPError
32
+
33
+ # ============================================================
34
+ # libsql import (mantém compatibilidade com seu código)
35
+ # ============================================================
36
+ try:
37
+ import libsql # precisa fornecer libsql.connect(...)
38
+ except Exception as e:
39
+ raise RuntimeError(
40
+ "Falha ao importar 'libsql'. Garanta no requirements.txt um pacote que exponha "
41
+ "'libsql.connect(DB_FILE, sync_url=..., auth_token=...)'. "
42
+ f"Erro: {e}"
43
+ )
44
+
45
+ # ============================================================
46
+ # Carrega .env
47
+ # ============================================================
48
+ load_dotenv()
49
+
50
+ # ============================================================
51
+ # Config DB/Turso
52
+ # ============================================================
53
+ DB_FILE = os.getenv("DB_FILE", "./data/lifeops.db")
54
+ TURSO_URL = os.getenv("TURSO_DATABASE_URL")
55
+ TURSO_TOKEN = os.getenv("TURSO_AUTH_TOKEN")
56
+
57
+ DEFAULT_GOALS = {
58
+ "sleepMin": 7.0,
59
+ "workoutsPerWeek": 3,
60
+ "foodTarget": 4,
61
+ "anxietyMax": 6,
62
+ }
63
+
64
+ # ============================================================
65
+ # Config Gemini
66
+ # ============================================================
67
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "").strip()
68
+ GEMINI_MODEL = os.getenv("GEMINI_MODEL", "gemini-2.5-flash").strip()
69
+ GEMINI_BASE_URL = os.getenv("GEMINI_BASE_URL", "https://generativelanguage.googleapis.com/v1beta").strip()
70
+
71
+ # Resiliência LLM
72
+ SNIX_CACHE_TTL_SEC = int(os.getenv("SNIX_CACHE_TTL_SEC", "900")) # 15 min
73
+ SNIX_RETRIES = int(os.getenv("SNIX_RETRIES", "3")) # tentativas em 429/5xx
74
+ SNIX_BACKOFF_BASE = float(os.getenv("SNIX_BACKOFF_BASE", "0.8")) # base do backoff
75
+ SNIX_BACKOFF_CAP = float(os.getenv("SNIX_BACKOFF_CAP", "8.0")) # teto do backoff
76
+ SNIX_MAX_OUTPUT_TOKENS = int(os.getenv("SNIX_MAX_OUTPUT_TOKENS", "800"))
77
+
78
+ # ============================================================
79
+ # App
80
+ # ============================================================
81
+ app = FastAPI(title="LifeOps API", version="1.2.3")
82
+
83
+ app.add_middleware(
84
+ CORSMiddleware,
85
+ allow_origins=["*"], # DEV ok. Produção: restrinja.
86
+ allow_methods=["*"],
87
+ allow_headers=["*"],
88
+ )
89
+
90
+ _conn: Optional["libsql.Connection"] = None
91
+ _lock = threading.Lock()
92
+
93
+ # Cache simples em memória (por processo)
94
+ _snix_cache_lock = threading.Lock()
95
+ _snix_cache: Dict[str, Dict[str, Any]] = {} # key -> {"ts": float, "value": dict}
96
+
97
+
98
+ # ============================================================
99
+ # Helpers DB
100
+ # ============================================================
101
+ def _ensure_db_dir() -> None:
102
+ db_dir = os.path.dirname(DB_FILE)
103
+ if db_dir:
104
+ os.makedirs(db_dir, exist_ok=True)
105
+
106
+
107
+ def _connect() -> "libsql.Connection":
108
+ _ensure_db_dir()
109
+ if TURSO_URL and TURSO_TOKEN:
110
+ # Embedded replica com sync
111
+ return libsql.connect(DB_FILE, sync_url=TURSO_URL, auth_token=TURSO_TOKEN)
112
+ # Somente local
113
+ return libsql.connect(DB_FILE)
114
+
115
+
116
+ def _sync(conn: "libsql.Connection") -> None:
117
+ if TURSO_URL and TURSO_TOKEN:
118
+ try:
119
+ conn.sync()
120
+ except Exception:
121
+ # Sync é "melhoria", não pode derrubar o app
122
+ pass
123
+
124
+
125
+ def _init_schema(conn: "libsql.Connection") -> None:
126
+ conn.execute("""
127
+ CREATE TABLE IF NOT EXISTS logs (
128
+ date TEXT PRIMARY KEY,
129
+ sleep REAL NOT NULL,
130
+ sleepQual INTEGER NOT NULL,
131
+ trained INTEGER NOT NULL,
132
+ trainMin INTEGER NOT NULL,
133
+ trainType TEXT,
134
+ foodScore INTEGER NOT NULL,
135
+ water INTEGER NOT NULL,
136
+ meals INTEGER NOT NULL,
137
+ mood INTEGER NOT NULL,
138
+ anxiety INTEGER NOT NULL,
139
+ notes TEXT
140
+ );
141
+ """)
142
+
143
+ conn.execute("""
144
+ CREATE TABLE IF NOT EXISTS state (
145
+ id INTEGER PRIMARY KEY CHECK(id = 1),
146
+ goals_json TEXT NOT NULL,
147
+ theme TEXT NOT NULL
148
+ );
149
+ """)
150
+
151
+ conn.execute(
152
+ "INSERT OR IGNORE INTO state (id, goals_json, theme) VALUES (1, ?, ?);",
153
+ (json.dumps(DEFAULT_GOALS, ensure_ascii=False), "dark"),
154
+ )
155
+ conn.commit()
156
+
157
+
158
+ def _require_conn() -> "libsql.Connection":
159
+ global _conn
160
+ if _conn is None:
161
+ _conn = _connect()
162
+ with _lock:
163
+ _init_schema(_conn)
164
+ _sync(_conn)
165
+ return _conn
166
+
167
+
168
+ def _bool_to_int(v: bool) -> int:
169
+ return 1 if v else 0
170
+
171
+
172
+ def _row_to_log(r) -> Dict[str, Any]:
173
+ return {
174
+ "date": r[0],
175
+ "sleep": float(r[1]),
176
+ "sleepQual": int(r[2]),
177
+ "trained": bool(r[3]),
178
+ "trainMin": int(r[4]),
179
+ "trainType": r[5] or "",
180
+ "foodScore": int(r[6]),
181
+ "water": bool(r[7]),
182
+ "meals": bool(r[8]),
183
+ "mood": int(r[9]),
184
+ "anxiety": int(r[10]),
185
+ "notes": r[11] or "",
186
+ }
187
+
188
+
189
+ def _merge_goals(goals: Optional[Dict[str, Any]]) -> Dict[str, Any]:
190
+ g = {**DEFAULT_GOALS, **(goals or {})}
191
+
192
+ def as_int(x, default):
193
+ try:
194
+ return int(x)
195
+ except Exception:
196
+ return default
197
+
198
+ def as_float(x, default):
199
+ try:
200
+ return float(x)
201
+ except Exception:
202
+ return default
203
+
204
+ return {
205
+ "sleepMin": as_float(g.get("sleepMin"), DEFAULT_GOALS["sleepMin"]),
206
+ "workoutsPerWeek": as_int(g.get("workoutsPerWeek"), DEFAULT_GOALS["workoutsPerWeek"]),
207
+ "foodTarget": as_int(g.get("foodTarget"), DEFAULT_GOALS["foodTarget"]),
208
+ "anxietyMax": as_int(g.get("anxietyMax"), DEFAULT_GOALS["anxietyMax"]),
209
+ }
210
+
211
+
212
+ def _parse_yyyy_mm_dd(s: str) -> date:
213
+ return datetime.strptime(s, "%Y-%m-%d").date()
214
+
215
+
216
+ def _today_safe() -> date:
217
+ return date.today()
218
+
219
+
220
+ # ============================================================
221
+ # Models
222
+ # ============================================================
223
+ class LogIn(BaseModel):
224
+ model_config = ConfigDict(extra="ignore")
225
+ date: str
226
+ sleep: float
227
+ sleepQual: int
228
+ trained: bool
229
+ trainMin: int = 0
230
+ trainType: Optional[str] = None
231
+ foodScore: int
232
+ water: bool
233
+ meals: bool
234
+ mood: int
235
+ anxiety: int
236
+ notes: str = ""
237
+
238
+
239
+ class SettingsIn(BaseModel):
240
+ model_config = ConfigDict(extra="ignore")
241
+ goals: Dict[str, Any] = Field(default_factory=dict)
242
+ theme: str = "dark"
243
+
244
+
245
+ class SnixCoachIn(BaseModel):
246
+ model_config = ConfigDict(extra="ignore")
247
+ days: int = 14
248
+ max_items: int = 60
249
+ focus: str = "ansiedade"
250
+ include_notes: bool = True
251
+
252
+
253
+ class SnixCoachOut(BaseModel):
254
+ ok: bool
255
+ coach: str
256
+ model: str
257
+ days: int
258
+ n_logs_used: int
259
+ report: str
260
+ stats: Dict[str, Any]
261
+
262
+
263
+ # ============================================================
264
+ # Analytics
265
+ # ============================================================
266
+ def _pearson_corr(xs: List[float], ys: List[float]) -> Optional[float]:
267
+ if len(xs) != len(ys) or len(xs) < 4:
268
+ return None
269
+ n = len(xs)
270
+ mx = sum(xs) / n
271
+ my = sum(ys) / n
272
+ num = sum((x - mx) * (y - my) for x, y in zip(xs, ys))
273
+ denx = sum((x - mx) ** 2 for x in xs) ** 0.5
274
+ deny = sum((y - my) ** 2 for y in ys) ** 0.5
275
+ if denx == 0 or deny == 0:
276
+ return None
277
+ return num / (denx * deny)
278
+
279
+
280
+ def _summarize_window(goals: Dict[str, Any], logs: List[Dict[str, Any]]) -> Dict[str, Any]:
281
+ n = len(logs)
282
+ anx_limit = int(goals.get("anxietyMax", 6))
283
+
284
+ sleep = [float(l.get("sleep", 0) or 0) for l in logs]
285
+ mood = [int(l.get("mood", 0) or 0) for l in logs]
286
+ anx = [int(l.get("anxiety", 0) or 0) for l in logs]
287
+ food = [int(l.get("foodScore", 0) or 0) for l in logs]
288
+ trained = [bool(l.get("trained", False)) for l in logs]
289
+
290
+ workouts = sum(1 for t in trained if t)
291
+ high_anx_days = sum(1 for a in anx if a > anx_limit)
292
+
293
+ peak_anx = max(anx) if anx else 0
294
+ peak_idx = anx.index(peak_anx) if anx else 0
295
+ peak_date = logs[peak_idx].get("date") if logs else None
296
+
297
+ anx_train = [a for a, t in zip(anx, trained) if t]
298
+ anx_not = [a for a, t in zip(anx, trained) if not t]
299
+ train_effect = None
300
+ if anx_train and anx_not:
301
+ train_effect = round((sum(anx_not) / len(anx_not)) - (sum(anx_train) / len(anx_train)), 3)
302
+
303
+ corr_sleep_anx = _pearson_corr(sleep, [float(a) for a in anx])
304
+ if corr_sleep_anx is not None:
305
+ corr_sleep_anx = round(corr_sleep_anx, 3)
306
+
307
+ # Contagem de dias faltando entre min e max (para medir lacunas)
308
+ dts = []
309
+ for l in logs:
310
+ try:
311
+ dts.append(_parse_yyyy_mm_dd(l["date"]))
312
+ except Exception:
313
+ pass
314
+
315
+ start = min(dts) if dts else None
316
+ end = max(dts) if dts else None
317
+ missing = 0
318
+ if start and end:
319
+ have = set(dts)
320
+ cur = start
321
+ while cur <= end:
322
+ if cur not in have:
323
+ missing += 1
324
+ cur += timedelta(days=1)
325
+
326
+ trend = {}
327
+ if n >= 6:
328
+ last3 = logs[-3:]
329
+ prev3 = logs[-6:-3]
330
+
331
+ def mean(arr, key) -> float:
332
+ return sum(float(x.get(key, 0) or 0) for x in arr) / len(arr)
333
+
334
+ trend = {
335
+ "anxiety_delta": round(mean(last3, "anxiety") - mean(prev3, "anxiety"), 2),
336
+ "sleep_delta": round(mean(last3, "sleep") - mean(prev3, "sleep"), 2),
337
+ "mood_delta": round(mean(last3, "mood") - mean(prev3, "mood"), 2),
338
+ }
339
+
340
+ return {
341
+ "n": n,
342
+ "window_start": start.isoformat() if start else None,
343
+ "window_end": end.isoformat() if end else None,
344
+ "missing_days_in_range": missing,
345
+ "anxiety_limit": anx_limit,
346
+ "avg_sleep": round(sum(sleep) / n, 2) if n else 0,
347
+ "avg_mood": round(sum(mood) / n, 2) if n else 0,
348
+ "avg_anxiety": round(sum(anx) / n, 2) if n else 0,
349
+ "avg_food": round(sum(food) / n, 2) if n else 0,
350
+ "workouts": workouts,
351
+ "high_anxiety_days": high_anx_days,
352
+ "peak_anxiety": int(peak_anx),
353
+ "peak_date": peak_date,
354
+ "train_effect": train_effect,
355
+ "corr_sleep_vs_anxiety": corr_sleep_anx,
356
+ "trend": trend,
357
+ }
358
+
359
+
360
+ def _select_window_from_logs(logs_desc: List[Dict[str, Any]], days: int) -> Dict[str, Any]:
361
+ """
362
+ Seleciona janela de `days` finalizando no último registro "plausível".
363
+ Estratégia:
364
+ - Prioriza logs até hoje (para não puxar "futuro" acidental).
365
+ - Se não houver dados suficientes, usa o que tiver.
366
+ """
367
+ today = _today_safe()
368
+ past_or_today, future = [], []
369
+
370
+ for l in logs_desc:
371
+ try:
372
+ d = _parse_yyyy_mm_dd(l["date"])
373
+ except Exception:
374
+ continue
375
+ (past_or_today if d <= today else future).append(l)
376
+
377
+ base = past_or_today if len(past_or_today) >= 3 else logs_desc
378
+ base_sorted = sorted(base, key=lambda x: x["date"])
379
+
380
+ end_date = _parse_yyyy_mm_dd(base_sorted[-1]["date"])
381
+ start_date = end_date - timedelta(days=days - 1)
382
+
383
+ window = []
384
+ for l in base_sorted:
385
+ try:
386
+ d = _parse_yyyy_mm_dd(l["date"])
387
+ except Exception:
388
+ continue
389
+ if start_date <= d <= end_date:
390
+ window.append(l)
391
+
392
+ return {
393
+ "window": window,
394
+ "future_count": len(future),
395
+ "used_start_date": start_date.isoformat(),
396
+ "used_end_date": end_date.isoformat(),
397
+ "used_past_only": len(past_or_today) >= 3,
398
+ }
399
+
400
+
401
+ # ============================================================
402
+ # Cache helpers (Snix)
403
+ # ============================================================
404
+ def _cache_get(key: str) -> Optional[Dict[str, Any]]:
405
+ now = time.time()
406
+ with _snix_cache_lock:
407
+ item = _snix_cache.get(key)
408
+ if not item:
409
+ return None
410
+ if (now - float(item["ts"])) > SNIX_CACHE_TTL_SEC:
411
+ _snix_cache.pop(key, None)
412
+ return None
413
+ return item["value"]
414
+
415
+
416
+ def _cache_set(key: str, value: Dict[str, Any]) -> None:
417
+ with _snix_cache_lock:
418
+ _snix_cache[key] = {"ts": time.time(), "value": value}
419
+
420
+
421
+ # ============================================================
422
+ # Gemini client (com retry em 429/5xx)
423
+ # ============================================================
424
+ def _validate_gemini_model_name(model: str) -> str:
425
+ m = (model or "").strip()
426
+ if not m:
427
+ raise HTTPException(status_code=503, detail="GEMINI_MODEL vazio. Ex.: gemini-2.5-flash.")
428
+
429
+ if m.startswith("models/"):
430
+ m = m[len("models/"):].strip()
431
+
432
+ low = m.lower()
433
+ if "llama" in low or "mixtral" in low:
434
+ raise HTTPException(
435
+ status_code=422,
436
+ detail=f"GEMINI_MODEL inválido para Gemini: '{m}'. Use um modelo Gemini (ex.: gemini-2.5-flash).",
437
+ )
438
+
439
+ if not low.startswith("gemini-"):
440
+ raise HTTPException(
441
+ status_code=422,
442
+ detail=f"GEMINI_MODEL suspeito: '{m}'. Use um modelo que comece com 'gemini-'.",
443
+ )
444
+
445
+ return m
446
+
447
+
448
+ def _gemini_list_models() -> Dict[str, Any]:
449
+ if not GEMINI_API_KEY:
450
+ raise HTTPException(status_code=503, detail="GEMINI_API_KEY não configurada no .env.")
451
+
452
+ base = (GEMINI_BASE_URL or "").strip().rstrip("/")
453
+ url = f"{base}/models?key={GEMINI_API_KEY}"
454
+
455
+ req = urlrequest.Request(url, headers={"Accept": "application/json"}, method="GET")
456
+
457
+ try:
458
+ with urlrequest.urlopen(req, timeout=25) as resp:
459
+ raw = resp.read().decode("utf-8", errors="replace")
460
+ return json.loads(raw)
461
+ except HTTPError as e:
462
+ body = ""
463
+ try:
464
+ body = e.read().decode("utf-8", errors="replace")
465
+ except Exception:
466
+ pass
467
+ raise HTTPException(status_code=502, detail=f"Gemini ListModels HTTPError: {e.code} {body[:400]}")
468
+ except URLError as e:
469
+ raise HTTPException(status_code=502, detail=f"Gemini ListModels URLError: {str(e)[:200]}")
470
+ except Exception as e:
471
+ raise HTTPException(status_code=502, detail=f"Gemini ListModels erro inesperado: {str(e)[:200]}")
472
+
473
+
474
+ def _gemini_generate_once(
475
+ system_text: str,
476
+ user_text: str,
477
+ model: str,
478
+ temperature: float,
479
+ max_output_tokens: int,
480
+ top_p: float,
481
+ ) -> Dict[str, Any]:
482
+ if not GEMINI_API_KEY:
483
+ raise HTTPException(status_code=503, detail="GEMINI_API_KEY não configurada no .env.")
484
+
485
+ model = _validate_gemini_model_name(model)
486
+
487
+ base = (GEMINI_BASE_URL or "").strip().rstrip("/")
488
+ url = f"{base}/models/{model}:generateContent?key={GEMINI_API_KEY}"
489
+
490
+ payload = {
491
+ "systemInstruction": {"parts": [{"text": system_text}]},
492
+ "contents": [{"role": "user", "parts": [{"text": user_text}]}],
493
+ "generationConfig": {
494
+ "temperature": float(temperature),
495
+ "maxOutputTokens": int(max_output_tokens),
496
+ "topP": float(top_p),
497
+ },
498
+ }
499
+
500
+ data = json.dumps(payload).encode("utf-8")
501
+ headers = {
502
+ "Content-Type": "application/json",
503
+ "Accept": "application/json",
504
+ "User-Agent": "LifeOps/1.2 (FastAPI; SnixCoach)",
505
+ "Connection": "close",
506
+ }
507
+
508
+ req = urlrequest.Request(url, data=data, headers=headers, method="POST")
509
+
510
+ with urlrequest.urlopen(req, timeout=40) as resp:
511
+ raw = resp.read().decode("utf-8", errors="replace")
512
+ j = json.loads(raw)
513
+
514
+ prompt_fb = j.get("promptFeedback") or {}
515
+ block_reason = prompt_fb.get("blockReason")
516
+
517
+ candidates = j.get("candidates") or []
518
+ text = ""
519
+ finish_reason = None
520
+
521
+ if candidates:
522
+ c0 = candidates[0] or {}
523
+ finish_reason = c0.get("finishReason")
524
+ content = c0.get("content") or {}
525
+ parts = content.get("parts") or []
526
+ texts: List[str] = []
527
+ for p in parts:
528
+ if isinstance(p, dict) and p.get("text"):
529
+ texts.append(str(p["text"]).strip())
530
+ text = "\n".join(t for t in texts if t).strip()
531
+
532
+ meta = {
533
+ "block_reason": block_reason,
534
+ "finish_reason": finish_reason,
535
+ "usage": j.get("usageMetadata"),
536
+ }
537
+
538
+ return {"text": text, "meta": meta, "raw_head": raw[:300]}
539
+
540
+
541
+ def _gemini_generate(
542
+ system_text: str,
543
+ user_text: str,
544
+ model: str,
545
+ temperature: float = 0.35,
546
+ max_output_tokens: int = 800,
547
+ top_p: float = 0.95,
548
+ ) -> Dict[str, Any]:
549
+ """
550
+ Retry em:
551
+ - 429 (quota/rate limit)
552
+ - 500/503 (instabilidade)
553
+ """
554
+ last_err: Optional[str] = None
555
+
556
+ for attempt in range(SNIX_RETRIES + 1):
557
+ try:
558
+ return _gemini_generate_once(
559
+ system_text=system_text,
560
+ user_text=user_text,
561
+ model=model,
562
+ temperature=temperature,
563
+ max_output_tokens=max_output_tokens,
564
+ top_p=top_p,
565
+ )
566
+ except HTTPError as e:
567
+ body = ""
568
+ try:
569
+ body = e.read().decode("utf-8", errors="replace")
570
+ except Exception:
571
+ pass
572
+
573
+ last_err = f"Gemini HTTPError: {e.code} {body[:300]}"
574
+
575
+ retriable = e.code in (429, 500, 503)
576
+ if (not retriable) or (attempt >= SNIX_RETRIES):
577
+ raise HTTPException(status_code=502, detail=last_err)
578
+
579
+ except URLError as e:
580
+ last_err = f"Gemini URLError: {str(e)[:200]}"
581
+ if attempt >= SNIX_RETRIES:
582
+ raise HTTPException(status_code=502, detail=last_err)
583
+
584
+ except Exception as e:
585
+ last_err = f"Gemini erro inesperado: {str(e)[:200]}"
586
+ if attempt >= SNIX_RETRIES:
587
+ raise HTTPException(status_code=502, detail=last_err)
588
+
589
+ # Backoff exponencial com jitter
590
+ sleep_s = min(SNIX_BACKOFF_CAP, SNIX_BACKOFF_BASE * (2 ** attempt))
591
+ sleep_s += random.uniform(0, 0.25)
592
+ time.sleep(sleep_s)
593
+
594
+ raise HTTPException(status_code=502, detail=last_err or "Falha desconhecida no Gemini.")
595
+
596
+
597
+ # ============================================================
598
+ # Fallback (sem LLM) — relatório determinístico
599
+ # ============================================================
600
+ def _snix_fallback_report(stats: Dict[str, Any], focus: str) -> str:
601
+ """
602
+ Relatório 100% offline e útil.
603
+ Observação: sem promessas, sem diagnóstico, só hábito + métrica.
604
+ """
605
+ focus = (focus or "ansiedade").strip()
606
+
607
+ lines = []
608
+ lines.append(f"# Snix (modo offline) — foco: {focus}")
609
+ lines.append("")
610
+ lines.append("## Leitura objetiva")
611
+ lines.append(f"- Janela: {stats.get('window_start_selected') or stats.get('window_start')} → {stats.get('window_end_selected') or stats.get('window_end')} ({stats.get('n')} registros)")
612
+ lines.append(f"- Sono médio: {stats.get('avg_sleep')}h (meta: {stats.get('sleepMin','?')}h)")
613
+ lines.append(f"- Humor médio: {stats.get('avg_mood')}/10")
614
+ lines.append(f"- Ansiedade média: {stats.get('avg_anxiety')}/10 (limite: {stats.get('anxiety_limit')})")
615
+ lines.append(f"- Dias acima do limite: {stats.get('high_anxiety_days')}")
616
+ lines.append(f"- Pico de ansiedade: {stats.get('peak_anxiety')}/10 em {stats.get('peak_date')}")
617
+ lines.append(f"- Treinos na janela: {stats.get('workouts')}")
618
+ if stats.get("corr_sleep_vs_anxiety") is not None:
619
+ lines.append(f"- Correlação sono×ansiedade: {stats.get('corr_sleep_vs_anxiety')} (sinal, não causalidade)")
620
+ if stats.get("train_effect") is not None:
621
+ lines.append(f"- Efeito treino (heurístico): {stats.get('train_effect')} (positivo sugere treino associado a menor ansiedade)")
622
+
623
+ lines.append("")
624
+ lines.append("## Plano mínimo (7 dias)")
625
+ lines.append("- 1) Sono: manter horário fixo de dormir/acordar (±30 min).")
626
+ lines.append("- 2) Movimento: 10–20 min em dias alternados (caminhada/força leve).")
627
+ lines.append("- 3) Registro: preencher todos os dias (reduz viés e melhora análise).")
628
+
629
+ lines.append("")
630
+ lines.append("## Protocolo rápido (1–5 min)")
631
+ lines.append("- Respiração 4-6 (inspirar 4s, expirar 6s) por 2 min.")
632
+ lines.append("- Anotar 3 preocupações + 1 próxima ação possível (2 min).")
633
+ lines.append("- Alongamento leve de pescoço/ombros (1–2 min).")
634
+
635
+ lines.append("")
636
+ lines.append("## 3 métricas para amanhã")
637
+ lines.append("- Horário de dormir e acordar (objetivo: consistência).")
638
+ lines.append("- Ansiedade (0–10) antes de dormir.")
639
+ lines.append("- Movimento (sim/não + minutos).")
640
+
641
+ lines.append("")
642
+ lines.append("> Sem quota do Gemini, eu viro estatístico. Quando a cota volta, eu viro coach de novo.")
643
+ return "\n".join(lines)
644
+
645
+
646
+ # ============================================================
647
+ # Prompt do Snix
648
+ # ============================================================
649
+ def _build_snix_prompt(
650
+ goals: Dict[str, Any],
651
+ window: List[Dict[str, Any]],
652
+ focus: str,
653
+ include_notes: bool,
654
+ ) -> Tuple[str, str, Dict[str, Any]]:
655
+ compact = []
656
+ for l in window:
657
+ compact.append({
658
+ "date": l["date"],
659
+ "sleep_h": float(l["sleep"]),
660
+ "sleep_qual_1to5": int(l["sleepQual"]),
661
+ "trained": bool(l["trained"]),
662
+ "train_min": int(l.get("trainMin", 0)),
663
+ "train_type": (l.get("trainType") or "")[:20],
664
+ "food_1to5": int(l["foodScore"]),
665
+ "water_ok": bool(l.get("water", False)),
666
+ "meals_ok": bool(l.get("meals", False)),
667
+ "mood_0to10": int(l["mood"]),
668
+ "anxiety_0to10": int(l["anxiety"]),
669
+ "notes": ((l.get("notes") or "")[:200] if include_notes else ""),
670
+ })
671
+
672
+ stats = _summarize_window(goals, window)
673
+
674
+ system_text = (
675
+ "Você é o Snix, coach de hábitos guiado por dados do LifeOps.\n"
676
+ "Missão: reduzir ansiedade e estabilizar humor com intervenções pequenas, realistas e mensuráveis.\n"
677
+ "Regras:\n"
678
+ "- Não faça diagnóstico médico/psicológico.\n"
679
+ "- Não use linguagem alarmista.\n"
680
+ "- Se notar ansiedade alta e persistente, sugira conversar com um adulto de confiança e, se possível, um profissional.\n"
681
+ "- Use linguagem direta, objetiva e prática em PT-BR.\n"
682
+ "- Baseie recomendações em stats/padrões e proponha experimentos simples.\n"
683
+ "- Inclua no máximo 1 linha curta de humor sagaz, sem banalizar o tema.\n"
684
+ )
685
+
686
+ user_payload = {
687
+ "focus": (focus or "ansiedade")[:40],
688
+ "goals": goals,
689
+ "stats": stats,
690
+ "logs": compact,
691
+ "tarefas": [
692
+ "1) Leitura objetiva dos dados (sem floreio).",
693
+ "2) Padrões e relações prováveis (sono vs ansiedade; treino vs ansiedade).",
694
+ "3) Hipóteses testáveis (máx. 4): 'se eu fizer X, espero Y'.",
695
+ "4) Plano de 7 dias (10–20 min/dia).",
696
+ "5) Protocolo anti-ansiedade (2–4 técnicas; 1–5 min).",
697
+ "6) 3 métricas para amanhã (simples).",
698
+ "7) Se houver gaps, como corrigir o registro.",
699
+ ],
700
+ "restricoes": ["Sem misticismo.", "Sem promessas absolutas.", "Nada perigoso."],
701
+ "formato": "Markdown com títulos curtos e listas.",
702
+ }
703
+
704
+ return system_text, json.dumps(user_payload, ensure_ascii=False), stats
705
+
706
+
707
+ # ============================================================
708
+ # Lifecycle
709
+ # ============================================================
710
+ @app.on_event("startup")
711
+ def on_startup() -> None:
712
+ global _conn
713
+ _conn = _connect()
714
+ with _lock:
715
+ _init_schema(_conn)
716
+ _sync(_conn)
717
+
718
+
719
+ @app.on_event("shutdown")
720
+ def on_shutdown() -> None:
721
+ global _conn
722
+ if _conn is not None:
723
+ try:
724
+ _conn.close()
725
+ finally:
726
+ _conn = None
727
+
728
+
729
+ # ============================================================
730
+ # Endpoints
731
+ # ============================================================
732
+ @app.get("/")
733
+ def root():
734
+ return {
735
+ "ok": True,
736
+ "service": "LifeOps API",
737
+ "version": app.version,
738
+ "docs": "/docs",
739
+ "health": "/health",
740
+ }
741
+
742
+
743
+ @app.get("/health")
744
+ def health():
745
+ return {
746
+ "ok": True,
747
+ "db_file": DB_FILE,
748
+ "turso_enabled": bool(TURSO_URL and TURSO_TOKEN),
749
+ "turso_url": TURSO_URL if TURSO_URL else None,
750
+ "snix_enabled": bool(GEMINI_API_KEY),
751
+ "snix_provider": "gemini",
752
+ "gemini_model": GEMINI_MODEL,
753
+ "gemini_base": GEMINI_BASE_URL,
754
+ "snix_cache_ttl_sec": SNIX_CACHE_TTL_SEC,
755
+ "snix_retries": SNIX_RETRIES,
756
+ }
757
+
758
+
759
+ @app.get("/llm/models")
760
+ def llm_models():
761
+ return _gemini_list_models()
762
+
763
+
764
+ @app.get("/state")
765
+ def get_state():
766
+ conn = _require_conn()
767
+
768
+ with _lock:
769
+ row = conn.execute("SELECT goals_json, theme FROM state WHERE id=1;").fetchone()
770
+ if not row:
771
+ raise HTTPException(status_code=500, detail="State não inicializado (id=1 ausente).")
772
+
773
+ try:
774
+ goals_raw = json.loads(row[0] or "{}")
775
+ except Exception:
776
+ goals_raw = {}
777
+
778
+ goals = _merge_goals(goals_raw)
779
+ theme = row[1] if row[1] in ("dark", "light") else "dark"
780
+
781
+ logs_rows = conn.execute("""
782
+ SELECT date, sleep, sleepQual, trained, trainMin, trainType, foodScore, water, meals, mood, anxiety, notes
783
+ FROM logs
784
+ ORDER BY date DESC;
785
+ """).fetchall()
786
+
787
+ logs: List[Dict[str, Any]] = [_row_to_log(r) for r in logs_rows]
788
+ return {"logs": logs, "goals": goals, "theme": theme}
789
+
790
+
791
+ @app.post("/logs")
792
+ def upsert_log(payload: LogIn):
793
+ conn = _require_conn()
794
+
795
+ if len(payload.date) != 10 or payload.date[4] != "-" or payload.date[7] != "-":
796
+ raise HTTPException(status_code=422, detail="date deve estar no formato YYYY-MM-DD.")
797
+
798
+ sleep = float(payload.sleep)
799
+ sleep_qual = int(payload.sleepQual)
800
+ food = int(payload.foodScore)
801
+ mood = int(payload.mood)
802
+ anx = int(payload.anxiety)
803
+ train_min = int(payload.trainMin or 0)
804
+
805
+ if sleep < 0 or sleep > 24:
806
+ raise HTTPException(status_code=422, detail="sleep deve estar entre 0 e 24.")
807
+ if sleep_qual < 1 or sleep_qual > 5:
808
+ raise HTTPException(status_code=422, detail="sleepQual deve estar entre 1 e 5.")
809
+ if food < 1 or food > 5:
810
+ raise HTTPException(status_code=422, detail="foodScore deve estar entre 1 e 5.")
811
+ if mood < 0 or mood > 10:
812
+ raise HTTPException(status_code=422, detail="mood deve estar entre 0 e 10.")
813
+ if anx < 0 or anx > 10:
814
+ raise HTTPException(status_code=422, detail="anxiety deve estar entre 0 e 10.")
815
+ if train_min < 0 or train_min > 600:
816
+ raise HTTPException(status_code=422, detail="trainMin fora do intervalo esperado (0–600).")
817
+
818
+ with _lock:
819
+ conn.execute("""
820
+ INSERT INTO logs (date, sleep, sleepQual, trained, trainMin, trainType, foodScore, water, meals, mood, anxiety, notes)
821
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
822
+ ON CONFLICT(date) DO UPDATE SET
823
+ sleep=excluded.sleep,
824
+ sleepQual=excluded.sleepQual,
825
+ trained=excluded.trained,
826
+ trainMin=excluded.trainMin,
827
+ trainType=excluded.trainType,
828
+ foodScore=excluded.foodScore,
829
+ water=excluded.water,
830
+ meals=excluded.meals,
831
+ mood=excluded.mood,
832
+ anxiety=excluded.anxiety,
833
+ notes=excluded.notes;
834
+ """, (
835
+ payload.date,
836
+ sleep,
837
+ sleep_qual,
838
+ _bool_to_int(payload.trained),
839
+ train_min,
840
+ (payload.trainType or ""),
841
+ food,
842
+ _bool_to_int(payload.water),
843
+ _bool_to_int(payload.meals),
844
+ mood,
845
+ anx,
846
+ payload.notes or "",
847
+ ))
848
+ conn.commit()
849
+ _sync(conn)
850
+
851
+ return {"ok": True}
852
+
853
+
854
+ @app.delete("/logs/{date_str}")
855
+ def delete_log(date_str: str):
856
+ conn = _require_conn()
857
+
858
+ if len(date_str) != 10 or date_str[4] != "-" or date_str[7] != "-":
859
+ raise HTTPException(status_code=422, detail="date deve estar no formato YYYY-MM-DD.")
860
+
861
+ with _lock:
862
+ conn.execute("DELETE FROM logs WHERE date=?;", (date_str,))
863
+ conn.commit()
864
+ _sync(conn)
865
+
866
+ return {"ok": True}
867
+
868
+
869
+ @app.put("/settings")
870
+ def save_settings(payload: SettingsIn):
871
+ conn = _require_conn()
872
+
873
+ merged = _merge_goals(payload.goals or {})
874
+ theme = payload.theme if payload.theme in ("dark", "light") else "dark"
875
+
876
+ with _lock:
877
+ conn.execute(
878
+ "UPDATE state SET goals_json=?, theme=? WHERE id=1;",
879
+ (json.dumps(merged, ensure_ascii=False), theme),
880
+ )
881
+ conn.commit()
882
+ _sync(conn)
883
+
884
+ return {"ok": True, "goals": merged, "theme": theme}
885
+
886
+
887
+ # ============================================================
888
+ # Snix Coach
889
+ # ============================================================
890
+ @app.post("/coach/snix", response_model=SnixCoachOut)
891
+ def snix_coach(payload: SnixCoachIn):
892
+ conn = _require_conn()
893
+
894
+ days = max(3, min(60, int(payload.days)))
895
+ max_items = max(10, min(240, int(payload.max_items)))
896
+ focus = (payload.focus or "ansiedade").strip()[:40]
897
+ include_notes = bool(payload.include_notes)
898
+
899
+ with _lock:
900
+ row = conn.execute("SELECT goals_json, theme FROM state WHERE id=1;").fetchone()
901
+ if not row:
902
+ raise HTTPException(status_code=500, detail="State não inicializado (id=1 ausente).")
903
+
904
+ try:
905
+ goals_raw = json.loads(row[0] or "{}")
906
+ except Exception:
907
+ goals_raw = {}
908
+
909
+ goals = _merge_goals(goals_raw)
910
+
911
+ rows = conn.execute("""
912
+ SELECT date, sleep, sleepQual, trained, trainMin, trainType, foodScore, water, meals, mood, anxiety, notes
913
+ FROM logs
914
+ ORDER BY date DESC
915
+ LIMIT ?;
916
+ """, (max_items,)).fetchall()
917
+
918
+ logs_desc = [_row_to_log(r) for r in rows]
919
+ if not logs_desc:
920
+ raise HTTPException(status_code=422, detail="Sem logs suficientes para análise do Snix.")
921
+
922
+ sel = _select_window_from_logs(logs_desc=logs_desc, days=days)
923
+ window = sel["window"]
924
+ if len(window) < 3:
925
+ raise HTTPException(status_code=422, detail="Poucos dados na janela (mínimo 3 dias).")
926
+
927
+ system_text, user_text, stats = _build_snix_prompt(goals, window, focus, include_notes)
928
+
929
+ # chave de cache: mesma janela, mesmo foco, mesmas notas => reutiliza
930
+ cache_key = f"days={days}|focus={focus}|notes={int(include_notes)}|end={sel['used_end_date']}|n={len(window)}"
931
+ cached = _cache_get(cache_key)
932
+ if cached:
933
+ return SnixCoachOut(**cached)
934
+
935
+ # tenta LLM; se bater quota, devolve fallback (200 OK)
936
+ try:
937
+ out = _gemini_generate(
938
+ system_text=system_text,
939
+ user_text=user_text,
940
+ model=GEMINI_MODEL,
941
+ temperature=0.35,
942
+ max_output_tokens=SNIX_MAX_OUTPUT_TOKENS,
943
+ top_p=0.95,
944
+ )
945
+
946
+ report = (out.get("text") or "").strip()
947
+ meta = out.get("meta") or {}
948
+
949
+ if meta.get("block_reason"):
950
+ report = (
951
+ "Sem resposta do Snix: a API bloqueou o conteúdo desta solicitação.\n"
952
+ "Tente foco diferente (ex.: 'sono', 'rotina') ou desative include_notes."
953
+ )
954
+ if not report:
955
+ report = (
956
+ "Sem resposta do Snix (texto vazio).\n"
957
+ "Aumente a janela (ex.: 21 dias) ou reduza notas (include_notes=false)."
958
+ )
959
+
960
+ if sel["future_count"] > 0:
961
+ report += (
962
+ "\n\nNota técnica: detectei registros em datas futuras. "
963
+ "A inferência prioriza dados até a data atual; o futuro é melhor como planejamento."
964
+ )
965
+
966
+ stats_out = {
967
+ **stats,
968
+ "sleepMin": goals.get("sleepMin"),
969
+ "window_start_selected": sel["used_start_date"],
970
+ "window_end_selected": sel["used_end_date"],
971
+ "used_past_only": sel["used_past_only"],
972
+ "future_count": sel["future_count"],
973
+ "llm_meta": meta,
974
+ "cache_key": cache_key,
975
+ }
976
+
977
+ result = SnixCoachOut(
978
+ ok=True,
979
+ coach="Snix",
980
+ model=_validate_gemini_model_name(GEMINI_MODEL),
981
+ days=days,
982
+ n_logs_used=len(window),
983
+ report=report,
984
+ stats=stats_out,
985
+ ).model_dump()
986
+
987
+ _cache_set(cache_key, result)
988
+ return SnixCoachOut(**result)
989
+
990
+ except HTTPException as e:
991
+ # Quota costuma aparecer como erro 429 encapsulado
992
+ detail = str(e.detail or "")
993
+ is_quota = (" 429 " in detail) or ("RESOURCE_EXHAUSTED" in detail) or ("exceeded your current quota" in detail)
994
+
995
+ if is_quota:
996
+ stats_out = {
997
+ **stats,
998
+ "sleepMin": goals.get("sleepMin"),
999
+ "window_start_selected": sel["used_start_date"],
1000
+ "window_end_selected": sel["used_end_date"],
1001
+ "used_past_only": sel["used_past_only"],
1002
+ "future_count": sel["future_count"],
1003
+ "llm_meta": {"error": "quota_exhausted"},
1004
+ "cache_key": cache_key,
1005
+ }
1006
+
1007
+ report = _snix_fallback_report(stats_out, focus)
1008
+ result = SnixCoachOut(
1009
+ ok=True,
1010
+ coach="Snix",
1011
+ model="offline-fallback",
1012
+ days=days,
1013
+ n_logs_used=len(window),
1014
+ report=report,
1015
+ stats=stats_out,
1016
+ ).model_dump()
1017
+
1018
+ _cache_set(cache_key, result)
1019
+ return SnixCoachOut(**result)
1020
+
1021
+ # Outros erros: propaga
1022
+ raise
1023
+
1024
+
1025
+ # ============================================================
1026
+ # Exec local opcional (não atrapalha Docker/HF)
1027
+ # ============================================================
1028
+ if __name__ == "__main__":
1029
+ import uvicorn
1030
+ port = int(os.getenv("PORT", "8000"))
1031
+ uvicorn.run("main:app", host="0.0.0.0", port=port, reload=False)
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ python-dotenv
4
+ requests
5
+ sqlalchemy
6
+ libsql-client
7
+ pydantic
8
+ numpy
9
+ scikit-learn
10
+ google-generativeai