Spaces:
Sleeping
Sleeping
| import os | |
| from pathlib import Path | |
| from typing import Optional, Tuple, List, Dict | |
| from functools import lru_cache | |
| import gradio as gr | |
| import pandas as pd | |
| import numpy as np | |
| import plotly.express as px | |
| import joblib | |
| # ZeroGPU + models | |
| import spaces | |
| import torch | |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline | |
| from huggingface_hub import InferenceClient | |
| # ------------------------ | |
| # Config & storage | |
| # ------------------------ | |
| DATA_DIR = Path("data") | |
| DATA_DIR.mkdir(exist_ok=True) | |
| TS_FMT = "%Y-%m-%d %H:%M:%S" | |
| DT_PATH = "./decision_tree_regressor.joblib" | |
| decision_tree_regressor = joblib.load(DT_PATH) | |
| # Local lightweight model (fallback) | |
| GEN_MODEL = "google/flan-t5-small" | |
| _tokenizer = AutoTokenizer.from_pretrained(GEN_MODEL) | |
| _model = AutoModelForSeq2SeqLM.from_pretrained(GEN_MODEL) | |
| _generate_cpu = pipeline("text2text-generation", model=_model, tokenizer=_tokenizer, device=-1) | |
| # HF Inference / Inference Providers models | |
| SOTA_MODELS = [ | |
| "Qwen/Qwen2.5-72B-Instruct", | |
| "meta-llama/Meta-Llama-3.1-70B-Instruct", | |
| "mistralai/Mistral-Nemo-Instruct-2407", | |
| "Qwen/Qwen2.5-32B-Instruct", | |
| "Qwen/Qwen2.5-7B-Instruct", | |
| "jesusvilela/manifoldgl", # <-- added | |
| ] | |
| # ------------------------ | |
| # HF token handling (Space Secrets) | |
| # ------------------------ | |
| def get_hf_api_key() -> Optional[str]: | |
| """ | |
| Grab Hugging Face token from env vars (Spaces Secrets). | |
| Priority: | |
| 1) HF_API_KEY (requested) | |
| 2) HF_TOKEN | |
| 3) common Hub token env vars | |
| """ | |
| return ( | |
| os.getenv("HF_API_KEY") | |
| or os.getenv("HF_TOKEN") | |
| or os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| or os.getenv("HUGGING_FACE_HUB_TOKEN") | |
| ) | |
| def _hf_client(model_id: str) -> InferenceClient: | |
| """ | |
| Cached client per model. Compatible with huggingface_hub versions where auth kwarg | |
| may be `api_key` (newer) or `token` (older). | |
| """ | |
| api_key = get_hf_api_key() | |
| if not api_key: | |
| raise RuntimeError( | |
| "Missing HF_API_KEY. Add a Space secret named HF_API_KEY (or HF_TOKEN) to enable HF inference." | |
| ) | |
| try: | |
| # Newer huggingface_hub | |
| return InferenceClient(model=model_id, api_key=api_key, timeout=120) | |
| except TypeError: | |
| # Older huggingface_hub | |
| return InferenceClient(model=model_id, token=api_key, timeout=120) | |
| def generate_with_hf_inference(prompt: str, model_id: str, max_new_tokens: int = 900) -> str: | |
| """ | |
| Generation via Hugging Face Inference (and/or Inference Providers). | |
| Works on CPU-only Spaces and with ZeroGPU. | |
| Requires a token in Space Secrets (HF_API_KEY recommended). | |
| """ | |
| try: | |
| client = _hf_client(model_id) | |
| text = client.text_generation( | |
| prompt, | |
| max_new_tokens=max_new_tokens, | |
| temperature=0.6, | |
| top_p=0.9, | |
| repetition_penalty=1.05, | |
| stop=["</s>"], | |
| return_full_text=False, | |
| ) | |
| return (text or "").strip() | |
| except Exception as e: | |
| # Fall back to local tiny model inside a GPU window if available | |
| return f"(HF Inference error: {e})\n" + generate_on_gpu(prompt, max_new_tokens=min(max_new_tokens, 600)) | |
| # ------------------------ | |
| # ZeroGPU functions (presence at import satisfies ZeroGPU) | |
| # ------------------------ | |
| def generate_on_gpu(prompt: str, max_new_tokens: int = 600) -> str: | |
| """ | |
| Generate with tiny local model. If CUDA is available in the ZeroGPU window, | |
| bind pipeline to GPU; otherwise use CPU. | |
| """ | |
| try: | |
| if torch.cuda.is_available(): | |
| gen = pipeline( | |
| "text2text-generation", | |
| model=_model.to("cuda"), | |
| tokenizer=_tokenizer, | |
| device=0, | |
| ) | |
| out = gen(prompt, max_new_tokens=max_new_tokens) | |
| else: | |
| out = _generate_cpu(prompt, max_new_tokens=max_new_tokens) | |
| return out[0]["generated_text"].strip() | |
| except Exception as e: | |
| out = _generate_cpu(prompt, max_new_tokens=max_new_tokens) | |
| return out[0]["generated_text"].strip() + f"\n\n(Note: GPU path failed: {e})" | |
| # ------------------------ | |
| # Metrics & helpers | |
| # ------------------------ | |
| ACTIVITY = { | |
| "Sedentary": 1.2, | |
| "Lightly active": 1.375, | |
| "Moderately active": 1.55, | |
| "Very active": 1.725, | |
| "Athlete": 1.9, | |
| } | |
| GOAL_CAL_ADJ = {"Fat loss": -0.15, "Recomp/Maintenance": 0.0, "Muscle gain": 0.10} | |
| def bmi(w, h): | |
| return w / ((h / 100) ** 2) | |
| def bmr_mifflin(sex, w, h, a): | |
| return 10 * w + 6.25 * h - 5 * a + (5 if sex == "Male" else -161) | |
| def tdee(bmr, act): | |
| return bmr * ACTIVITY.get(act, 1.2) | |
| def parse_hhmm(hhmm: str) -> Tuple[int, int]: | |
| h, m = hhmm.split(":") | |
| h = int(h) | |
| m = int(m) | |
| if not (0 <= h <= 23 and 0 <= m <= 59): | |
| raise ValueError("Time must be HH:MM (24h).") | |
| return h, m | |
| def fmt_hhmm(h: int, m: int) -> str: | |
| return f"{h:02d}:{m:02d}" | |
| # Meal ideas, workouts, etc. | |
| DIET_STYLES = ["Mediterranean", "Omnivore", "Vegetarian", "Vegan", "Low-carb"] | |
| MEAL_IDEAS = { | |
| "Mediterranean": [ | |
| "Oats + dates + walnuts + olive oil", | |
| "Grilled fish, lentil salad, greens", | |
| "Hummus, wholegrain pita, veggies", | |
| "Chickpea tomato stew", | |
| "Feta & olive salad, quinoa", | |
| "Shakshuka + side salad", | |
| "Lentils, roasted veg, tahini", | |
| ], | |
| "Omnivore": [ | |
| "Yogurt + berries + nuts", | |
| "Chicken bowl (rice, veg, olive oil)", | |
| "Eggs, avocado, sourdough", | |
| "Salmon, quinoa, asparagus", | |
| "Lean beef, sweet potato, salad", | |
| "Tuna whole-grain wrap", | |
| "Cottage cheese + fruit + seeds", | |
| ], | |
| "Vegetarian": [ | |
| "Tofu scramble, toast, avocado", | |
| "Paneer tikka bowl", | |
| "Bean chili + brown rice", | |
| "Halloumi, couscous, veg", | |
| "Greek salad + eggs", | |
| "Tempeh stir-fry", | |
| "Yogurt parfait + granola", | |
| ], | |
| "Vegan": [ | |
| "Tofu scramble, avocado toast", | |
| "Lentil curry + basmati", | |
| "Burrito bowl (beans, corn, salsa)", | |
| "Seitan, roasted potatoes, veg", | |
| "Tofu poke bowl", | |
| "Chickpea pasta + marinara", | |
| "Overnight oats + banana + PB", | |
| ], | |
| "Low-carb": [ | |
| "Eggs, smoked salmon, salad", | |
| "Chicken Caesar (no croutons)", | |
| "Beef & greens stir-fry", | |
| "Omelette + veg + cheese", | |
| "Zoodles + turkey bolognese", | |
| "Tofu salad w/ tahini", | |
| "Yogurt + nuts (moderate)", | |
| ], | |
| } | |
| WORKOUTS = { | |
| "Fat loss": [ | |
| "3× LISS cardio 30–40min", | |
| "2× full-body strength 45min", | |
| "1× intervals 12–16min", | |
| "Daily 8–10k steps", | |
| ], | |
| "Recomp/Maintenance": [ | |
| "3× full-body strength 45–60min", | |
| "1–2× LISS cardio 30min", | |
| "Mobility 10min daily", | |
| "8–10k steps", | |
| ], | |
| "Muscle gain": [ | |
| "4× strength split 45–60min", | |
| "Optional 1× LISS 20–30min", | |
| "Mobility 10min", | |
| "7–9k steps", | |
| ], | |
| } | |
| def feeding_schedule(first_meal_hhmm: str, fasting_hours: float) -> List[Tuple[str, str]]: | |
| h, m = parse_hhmm(first_meal_hhmm) | |
| window = max(0.0, 24 - float(fasting_hours)) | |
| start_minutes = h * 60 + m | |
| end_minutes = int((start_minutes + window * 60) % (24 * 60)) | |
| sched = [] | |
| for _ in range(7): | |
| start = fmt_hhmm(h, m) | |
| end = fmt_hhmm(end_minutes // 60, end_minutes % 60) | |
| sched.append((start, end)) | |
| return sched | |
| def weekly_plan(diet: str, sched: List[Tuple[str, str]], kcal: int, protein_g: int) -> pd.DataFrame: | |
| ideas = MEAL_IDEAS[diet] | |
| rows = [] | |
| for i in range(7): | |
| day = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][i] | |
| start, end = sched[i] | |
| meal1 = ideas[i % len(ideas)] | |
| meal2 = ideas[(i + 3) % len(ideas)] | |
| snack = "Fruit or nuts (optional)" | |
| rows.append( | |
| { | |
| "Day": day, | |
| "Feeding window": f"{start}–{end}", | |
| "Meal 1": meal1, | |
| "Meal 2": meal2, | |
| "Protein target": f"≥ {protein_g} g", | |
| "Daily kcal": kcal, | |
| "Snack": snack, | |
| } | |
| ) | |
| return pd.DataFrame(rows) | |
| def shopping_list(diet: str) -> List[str]: | |
| core = [ | |
| "Leafy greens, mixed veg, berries", | |
| "Olive oil, nuts/seeds, herbs & spices", | |
| "Coffee/tea, mineral water, electrolytes", | |
| ] | |
| extras = { | |
| "Omnivore": [ | |
| "Chicken, fish, eggs, yogurt, cottage cheese", | |
| "Rice/quinoa/sourdough", | |
| "Beans/lentils", | |
| ], | |
| "Mediterranean": [ | |
| "Fish, feta, olives", | |
| "Whole grains (bulgur, farro)", | |
| "Chickpeas/lentils", | |
| ], | |
| "Vegetarian": ["Eggs, dairy, paneer", "Legumes", "Tofu/tempeh"], | |
| "Vegan": ["Tofu/tempeh/seitan", "Beans/lentils", "Plant yogurt/milk"], | |
| "Low-carb": ["Eggs, fish, meat", "Green veg", "Greek yogurt, cheese"], | |
| } | |
| return core + extras[diet] | |
| # ------------------------ | |
| # Plan builder (with SOTA + local fallback) | |
| # ------------------------ | |
| def predict_and_plan( | |
| fasting_duration, | |
| meal_timing, | |
| weight, | |
| age, | |
| gender, | |
| height, | |
| activity, | |
| goal, | |
| diet, | |
| lang, | |
| use_sota_model, | |
| sota_model_id, | |
| ) -> Tuple[Optional[float], str, str, pd.DataFrame, object, str]: | |
| try: | |
| if fasting_duration < 0 or fasting_duration > 72: | |
| raise ValueError("Fasting must be 0–72h.") | |
| h, m = parse_hhmm(meal_timing) | |
| if weight <= 0 or height <= 0 or age < 0: | |
| raise ValueError("Invalid weight/height/age.") | |
| # Predict score | |
| df = pd.DataFrame( | |
| { | |
| "Fasting Duration (hours)": [float(fasting_duration)], | |
| "Meal Timing (hour:minute)": [h + m / 60], | |
| "Body Weight (kg)": [float(weight)], | |
| "Age (years)": [float(age)], | |
| "Height (cm)": [float(height)], | |
| "Gender_Male": [1 if gender == "Male" else 0], | |
| "Gender_Other": [1 if gender == "Other" else 0], | |
| } | |
| ) | |
| score = float(decision_tree_regressor.predict(df)[0]) | |
| # Metrics | |
| bmr = bmr_mifflin(gender, weight, height, age) | |
| tdee_kcal = tdee(bmr, activity) | |
| target_kcal = int(round(tdee_kcal * (1 + GOAL_CAL_ADJ[goal]))) | |
| protein_g = int(round(max(1.6 * weight, 80))) | |
| bmi_val = round(bmi(weight, height), 1) | |
| # Schedule, plan table, chart | |
| sched = feeding_schedule(meal_timing, float(fasting_duration)) | |
| plan_df = weekly_plan(diet, sched, target_kcal, protein_g) | |
| chart_df = pd.DataFrame( | |
| { | |
| "Day": ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], | |
| "start": [int(s.split(":")[0]) * 60 + int(s.split(":")[1]) for s, _ in sched], | |
| "length": [max(0, int((24 - float(fasting_duration)) * 60))] * 7, | |
| } | |
| ) | |
| fig = px.bar( | |
| chart_df, | |
| y="Day", | |
| x="length", | |
| base="start", | |
| orientation="h", | |
| title="Feeding window each day (minutes)", | |
| ) | |
| fig.update_layout( | |
| xaxis=dict( | |
| range=[0, 1440], | |
| tickvals=[0, 360, 720, 1080, 1440], | |
| ticktext=["00:00", "06:00", "12:00", "18:00", "24:00"], | |
| ), | |
| height=300, | |
| margin=dict(l=10, r=10, t=40, b=10), | |
| ) | |
| # Base markdown (deterministic, structured). Optionally enhance with SOTA. | |
| kpis = ( | |
| f"**Score:** {score:.1f} • **BMI:** {bmi_val} • **BMR:** {int(bmr)} kcal • " | |
| f"**TDEE:** {int(tdee_kcal)} kcal • **Target:** {target_kcal} kcal • **Protein:** ≥ {protein_g} g • " | |
| f"**Diet:** {diet}" | |
| ) | |
| sched_md = "\n".join( | |
| [ | |
| f"- **{d}**: {s} – {e}" | |
| for d, (s, e) in zip(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], sched) | |
| ] | |
| ) | |
| workouts_md = "\n".join([f"- {w}" for w in WORKOUTS[goal]]) | |
| shop_md = "\n".join([f"- {x}" for x in shopping_list(diet)]) | |
| base_plan_md = f""" | |
| ## Your 7-day intermittent fasting plan | |
| {kpis} | |
| ### Feeding window (daily) | |
| {sched_md} | |
| ### Weekly training | |
| {workouts_md} | |
| ### Daily meals (example week) | |
| (See the table below.) | |
| ### Shopping list | |
| {shop_md} | |
| > Hydration & electrolytes during the fast, protein at each meal, whole foods, and 7–9 hours sleep. | |
| """.strip() | |
| # Enhance/format with chosen generator | |
| if use_sota_model: | |
| if not get_hf_api_key(): | |
| # Friendly guidance + fallback | |
| plan_md = ( | |
| "⚠️ **HF Inference is enabled but no token was found.**\n\n" | |
| "Add a Space secret named `HF_API_KEY` (or `HF_TOKEN`) in **Settings → Repository secrets**, " | |
| "or uncheck **Use SOTA model** to use the local fallback.\n\n" | |
| + generate_on_gpu( | |
| "Rewrite in a friendly coaching tone; keep markdown structure; do not remove tables or metrics.\n\n" | |
| + base_plan_md, | |
| max_new_tokens=700, | |
| ) | |
| ) | |
| else: | |
| plan_md = generate_with_hf_inference( | |
| prompt=( | |
| "You are an expert health coach. Refine the following intermittent fasting plan. " | |
| "Keep markdown headings and bullets; be concise and specific; keep the meaning. " | |
| f"Language: '{lang}'.\n\n{base_plan_md}" | |
| ), | |
| model_id=sota_model_id, | |
| max_new_tokens=900, | |
| ) | |
| else: | |
| # Local tiny model inside ZeroGPU window (or CPU fallback) | |
| plan_md = generate_on_gpu( | |
| "Rewrite in a friendly coaching tone; keep markdown structure; do not remove tables or metrics.\n\n" | |
| + base_plan_md, | |
| max_new_tokens=700, | |
| ) | |
| # Save for download | |
| md_path = DATA_DIR / "plan.md" | |
| md_path.write_text(plan_md, encoding="utf-8") | |
| return score, kpis, plan_md, plan_df, fig, str(md_path) | |
| except Exception as e: | |
| return None, "", f"⚠️ {e}", pd.DataFrame(), None, "" | |
| # ------------------------ | |
| # Tracker logic | |
| # ------------------------ | |
| active_fasts: Dict[str, pd.Timestamp] = {} | |
| def _csv(user: str) -> Path: | |
| safe = "".join(ch for ch in (user or "default") if ch.isalnum() or ch in ("_", "-")) | |
| return DATA_DIR / f"{safe}.csv" | |
| def hist_load(user: str) -> pd.DataFrame: | |
| p = _csv(user) | |
| if p.exists(): | |
| d = pd.read_csv(p) | |
| for c in ["start_time", "end_time"]: | |
| if c in d: | |
| d[c] = pd.to_datetime(d[c], errors="coerce") | |
| return d | |
| return pd.DataFrame(columns=["start_time", "end_time", "duration_hours", "note"]) | |
| def hist_save(user: str, d: pd.DataFrame): | |
| d.to_csv(_csv(user), index=False) | |
| def make_hist_chart(df: pd.DataFrame): | |
| if df.empty: | |
| return None | |
| d = df.dropna(subset=["end_time"]).copy() | |
| if d.empty: | |
| return None | |
| d["date"] = pd.to_datetime(d["end_time"]).dt.date | |
| fig = px.bar(d, x="date", y="duration_hours", title="Fasting duration by day (h)") | |
| fig.update_layout(height=300, margin=dict(l=10, r=10, t=40, b=10)) | |
| return fig | |
| def compute_streak(df: pd.DataFrame) -> int: | |
| d = df.dropna(subset=["end_time"]).copy() | |
| if d.empty: | |
| return 0 | |
| days = set(pd.to_datetime(d["end_time"]).dt.date) | |
| cur = pd.Timestamp.now().date() | |
| streak = 0 | |
| while cur in days: | |
| streak += 1 | |
| cur = cur - pd.Timedelta(days=1) | |
| return streak | |
| def hist_stats(df: pd.DataFrame) -> str: | |
| if df.empty: | |
| return "No history yet." | |
| last7 = df.tail(7) | |
| avg = last7["duration_hours"].mean() | |
| streak = compute_streak(df) | |
| return f"Total fasts: {len(df)}\nAvg (last 7): {avg:.2f} h\nCurrent streak: {streak} day(s)" | |
| def start_fast(user: str, note: str): | |
| if not user: | |
| return "Enter username in Tracker.", None | |
| if user in active_fasts: | |
| return f"Already fasting since {active_fasts[user].strftime(TS_FMT)}.", None | |
| active_fasts[user] = pd.Timestamp.now() | |
| return f"✅ Fast started at {active_fasts[user].strftime(TS_FMT)}.", None | |
| def end_fast(user: str): | |
| if not user: | |
| return "Enter username.", None, None, None | |
| if user not in active_fasts: | |
| return "No active fast.", None, None, None | |
| end = pd.Timestamp.now() | |
| start = active_fasts.pop(user) | |
| dur = round((end - start).total_seconds() / 3600, 2) | |
| df = hist_load(user) | |
| df.loc[len(df)] = [start, end, dur, ""] | |
| hist_save(user, df) | |
| return f"✅ Fast ended at {end.strftime(TS_FMT)} • {dur} h", df.tail(12), make_hist_chart(df), hist_stats(df) | |
| def refresh_hist(user: str): | |
| df = hist_load(user) | |
| return df.tail(12), make_hist_chart(df), hist_stats(df) | |
| # ------------------------ | |
| # UI | |
| # ------------------------ | |
| def hf_status_md() -> str: | |
| key = get_hf_api_key() | |
| if key: | |
| return "✅ **HF API key detected** (SOTA inference will work)." | |
| return "⚠️ **HF API key not detected.** Add a Space secret named `HF_API_KEY` (or `HF_TOKEN`) to enable SOTA inference." | |
| with gr.Blocks( | |
| title="Intermittent Fasting Coach — Pro (SOTA)", | |
| theme=gr.themes.Soft( | |
| primary_hue=gr.themes.colors.orange, | |
| neutral_hue=gr.themes.colors.gray, | |
| ), | |
| ) as demo: | |
| gr.Markdown( | |
| """ | |
| # 🥣 Intermittent Fasting — Pro (SOTA) | |
| Detailed coaching plans + tracker. ZeroGPU-ready (with CPU fallback). Data stored locally in this Space. | |
| """ | |
| ) | |
| with gr.Tabs(): | |
| # --- Coach tab | |
| with gr.TabItem("Coach"): | |
| hf_status = gr.Markdown() | |
| with gr.Row(): | |
| with gr.Column(): | |
| fasting_duration = gr.Number( | |
| label="Fasting duration (h)", value=16, minimum=0, maximum=72, step=0.5 | |
| ) | |
| meal_timing = gr.Textbox(label="First meal time (HH:MM)", value="12:30") | |
| weight = gr.Number(label="Body weight (kg)", value=70, step=0.5) | |
| with gr.Column(): | |
| age = gr.Slider(label="Age (years)", minimum=18, maximum=100, value=35) | |
| gender = gr.Radio(["Male", "Female", "Other"], label="Gender", value="Male") | |
| height = gr.Number(label="Height (cm)", value=175) | |
| with gr.Row(): | |
| activity = gr.Dropdown(choices=list(ACTIVITY.keys()), value="Lightly active", label="Activity") | |
| goal = gr.Dropdown(choices=list(GOAL_CAL_ADJ.keys()), value="Recomp/Maintenance", label="Goal") | |
| diet = gr.Dropdown(choices=DIET_STYLES, value="Mediterranean", label="Diet style") | |
| lang = gr.Radio(["en", "es"], value="en", label="Language") | |
| use_sota_model = gr.Checkbox(value=True, label="Use SOTA model (HF Inference)") | |
| sota_model_id = gr.Dropdown(choices=SOTA_MODELS, value=SOTA_MODELS[0], label="HF model") | |
| btn = gr.Button("Predict & Build Plan", variant="primary") | |
| score_out = gr.Number(label="Predicted score") | |
| kpi_out = gr.Markdown() | |
| plan_md = gr.Markdown() | |
| plan_tbl = gr.Dataframe( | |
| headers=["Day", "Feeding window", "Meal 1", "Meal 2", "Protein target", "Daily kcal", "Snack"], | |
| interactive=False, | |
| ) | |
| fig = gr.Plot() | |
| dl = gr.DownloadButton(label="Download plan (.md)") | |
| btn.click( | |
| predict_and_plan, | |
| inputs=[ | |
| fasting_duration, | |
| meal_timing, | |
| weight, | |
| age, | |
| gender, | |
| height, | |
| activity, | |
| goal, | |
| diet, | |
| lang, | |
| use_sota_model, | |
| sota_model_id, | |
| ], | |
| outputs=[score_out, kpi_out, plan_md, plan_tbl, fig, dl], | |
| api_name="coach_plan", | |
| ) | |
| # --- Tracker tab | |
| with gr.TabItem("Tracker"): | |
| with gr.Row(): | |
| user = gr.Textbox(label="Username", value="") | |
| note = gr.Textbox(label="Note (optional)") | |
| with gr.Row(): | |
| b1 = gr.Button("Start fast", variant="primary") | |
| b2 = gr.Button("End fast") | |
| b3 = gr.Button("Reload history") | |
| status = gr.Markdown("Not fasting.") | |
| hist = gr.Dataframe(interactive=False) | |
| hist_fig = gr.Plot() | |
| stats = gr.Markdown() | |
| b1.click(start_fast, inputs=[user, note], outputs=[status, note]) | |
| b2.click(end_fast, inputs=[user], outputs=[status, hist, hist_fig, stats]) | |
| b3.click(refresh_hist, inputs=[user], outputs=[hist, hist_fig, stats]) | |
| demo.load(refresh_hist, inputs=[user], outputs=[hist, hist_fig, stats]) | |
| # --- About tab | |
| with gr.TabItem("About"): | |
| gr.Markdown( | |
| """ | |
| **How it works** | |
| • The predictor estimates a health score from inputs. | |
| • The coach builds a 7-day schedule matching your fasting window, goal, activity and diet style. | |
| • SOTA option uses Hugging Face Inference; fallback uses a tiny local model in the ZeroGPU window. | |
| • Tracker stores CSVs under `/data/` and never sends data elsewhere. | |
| **Enable SOTA inference** | |
| Add a Space secret named `HF_API_KEY` (recommended) or `HF_TOKEN` in **Settings → Repository secrets**. | |
| """ | |
| ) | |
| # Show whether token is detected (does not reveal the token) | |
| demo.load(hf_status_md, outputs=[hf_status]) | |
| if __name__ == "__main__": | |
| demo.queue().launch() | |