File size: 3,506 Bytes
509ebcd 559c176 509ebcd 559c176 c2781c8 559c176 c2781c8 559c176 c2781c8 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 509ebcd 559c176 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import streamlit as st
import pandas as pd
import joblib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from langchain_google_genai import GoogleGenerativeAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# β
Streamlit page config (must be first command)
st.set_page_config(page_title="Interactive Sleep Predictor", layout="wide")
# UI Title
st.title("β° Interactive Sleep & Health Predictor")
st.markdown("Track your sleep, activity & get personalized health + fitness advice with Gemini π§ πͺ")
# Load model
@st.cache_resource
def load_model():
return joblib.load("log_reg_model.pkl") # Update if your model path is different
model = load_model()
# LangChain Setup
api_key = st.secrets.get('genai_key')
llm = GoogleGenerativeAI(model="gemini-1.5-pro", google_api_key=api_key)
# LangChain Prompt Template
prompt_template = """
You are a certified health and fitness advisor.
A user has recorded:
- Sleep Duration: {sleep_duration} hours
- Step Count: {step_count} steps
- Current State: {state} (awake or asleep)
Based on these values:
1. Give a personalized health and wellness suggestion (max 5 lines).
2. Give specific exercise tips suitable for their state and activity level (step count).
3. Mention if their step count is low/average/high and whether they should increase activity.
Start with "π€ Summary for the User:" and then provide your insights.
"""
# Chain to generate Gemini suggestions
def generate_personalized_insights(sleep_duration, step_count, state):
prompt = PromptTemplate(
input_variables=["sleep_duration", "step_count", "state"],
template=prompt_template
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.run({
"sleep_duration": sleep_duration,
"step_count": step_count,
"state": state
})
# User form
with st.form("predictor_form"):
step = st.number_input("πΆ Step Count (today)", min_value=0, step=10)
hour = st.slider("β° Hour of the Day", min_value=0, max_value=23)
col1, col2 = st.columns(2)
with col1:
sleep_time = st.time_input("π Sleep Onset Time")
with col2:
wake_time = st.time_input("π Wake-Up Time")
submit_button = st.form_submit_button("Predict & Get Gemini Tips")
# On Submit
if submit_button:
# Predict sleep state (0 = awake, 1 = asleep)
input_df = pd.DataFrame([[step, hour]], columns=["step", "hour"])
prediction = model.predict(input_df)[0]
state = "asleep" if prediction == 1 else "awake"
emoji = "π΄" if state == "asleep" else "π"
# Sleep duration calculation
today = datetime.today()
sleep_dt = datetime.combine(today, sleep_time)
wake_dt = datetime.combine(today, wake_time)
if wake_dt < sleep_dt:
wake_dt += timedelta(days=1)
sleep_duration = round((wake_dt - sleep_dt).seconds / 3600, 2)
# Display prediction
st.success(f"{emoji} **You're likely {state}**. You've logged **{sleep_duration} hours** of sleep and taken **{step} steps** today.")
# LangChain Gemini Suggestions
insights = generate_personalized_insights(sleep_duration, step, state)
st.markdown("### π§ Gemini-Generated Tips:")
st.markdown(insights)
# Sleep Visualization
fig, ax = plt.subplots(figsize=(8, 4))
ax.barh(["Your Sleep Duration"], sleep_duration, color="skyblue")
ax.set_xlim(0, 10)
ax.set_xlabel("Hours")
ax.set_title("Logged Sleep Duration")
st.pyplot(fig)
|