File size: 3,636 Bytes
46ae194 819e6eb 46ae194 7b7b7ec | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | import streamlit as st
import pandas as pd
import numpy as np
from openai import OpenAI
import json
import utills
import joblib
from huggingface_hub import hf_hub_download
import os
with open('features.json', 'r') as file:
col_map = json.load(file)
print("Initializing Streamlit UI...")
utills.streamlit_layout()
utills.css_markdown()
# Load trained model and preprocessing tools
print("Load trained model and preprocessing tools...")
@st.cache_resource
def load_model():
model_path = hf_hub_download(
repo_id="Dwaipayan08/random_forest_clinical_diabetes",
filename="rf_model.pkl"
)
return joblib.load(model_path)
model = load_model()
# model = joblib.load("rf_model.pkl")
scaler = joblib.load("scaler.pkl")
feature_names = joblib.load("feature_names.pkl")
model_details = joblib.load("model_details.pkl")
# ----------------- Streamlit UI -----------------------
tab1, tab2, tab3 = st.tabs(["π Input Form", "π Results", "Model Specifications"])
with tab1:
st.subheader("Fill Patient Information")
user_input = pd.DataFrame(columns=feature_names)
col1, col2, col3 = st.columns(3)
col_list = [col1, col2, col3]
user_input = utills.get_user_input(feature_names, col_map, col_list)
if st.button("π Predict Diabetes Risk"):
with col2:
input_df = user_input
input_scaled = scaler.transform(input_df)
prediction = model.predict(input_scaled)[0]
prob = model.predict_proba(input_scaled)[0][1]
st.session_state["prediction"] = prediction
st.session_state["prob"] = prob
st.session_state["input"] = user_input.to_dict()
st.markdown(f"User Input: ")
st.table(user_input)
with tab2:
if "prediction" in st.session_state:
st.subheader("π Prediction Result")
st.markdown(f"### {'π₯ Diabetes Risk Detected' if st.session_state['prediction'] else 'π© No Diabetes Risk'}")
st.write(f"**Risk Level**: {st.session_state['prob']*100:.2f}%")
# OpenAI Suggestion
client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
prompt = f"""
Based on the below user data: Give some medical health advice related to diabetes and the measures this person should take
to prevent diabtes in later life. \n
Patient data: {st.session_state["input"]} \n
Context: Based on a random forest classifier model,
the probability of this user getting diabetes in later life came out to be {st.session_state['prob']}. \n
If diabetic risk is high, provide 3 practical suggestions. Keep the tone supportive and non-clinical.
"""
try:
response = client.responses.create(
model="gpt-4o",
instructions="You are a medical expert and is consulting an user.",
input=prompt,
temperature=0.7,
max_output_tokens=400
)
suggestion = response.output_text
st.subheader("π‘ AI Health Suggestions")
st.write(suggestion)
except Exception as e:
st.error(f"OpenAI API Error: {e}")
st.warning("Note: Results are not a substitute for medical advice.")
else:
st.info("Run a prediction from the 'Input Form' tab.")
with tab3:
st.subheader("Below are the specifications of the backend model")
st.markdown(f"""
Model Used: Random Forest \n
Accuracy of the model: 86.6% \n
Recall score of the model: 88.5% \n
F1 Score: 68.2% \n """) |