MediSynapse / app.py
Haseeb-001's picture
Update app.py
e63f0fd verified
import streamlit as st
from ctransformers import AutoModelForCausalLM
from datetime import datetime
# Configuration
MODEL_PATH = "llama-3.1-medpalm2-imitate-8b-instruct.Q4_K_M.gguf"
THEME_CONFIG = {
"primary": "#2563EB",
"background": "#0F172A",
"text": "#F8FAFC"
}
# Initialize session state
if "history" not in st.session_state:
st.session_state.history = []
@st.cache_resource(show_spinner=False)
def load_model():
return AutoModelForCausalLM.from_pretrained(
MODEL_PATH,
model_type="llama",
gpu_layers=40,
context_length=2048
)
def medical_prompt(query):
return f"""<|im_start|>system
You are a medical expert AI. Provide accurate, compassionate responses.
Follow this structure:
1. Analyze symptoms
2. List possible conditions (with likelihood)
3. Recommend immediate actions
4. Suggest diagnostic tests
5. Prevention tips
6. Ask follow-up questions
Keep responses under 300 words.<|im_end|>
<|im_start|>user
{query}<|im_end|>
<|im_start|>assistant
"""
def generate_response(query):
try:
response = model(
medical_prompt(query),
temperature=0.7,
top_p=0.85,
max_new_tokens=400
)
return response.strip().replace("<|im_end|>", "")
except Exception as e:
return f"⚠️ Error: {str(e)}"
# UI Setup
st.set_page_config(
page_title="MedPalm AI",
page_icon="🧠",
layout="wide"
)
st.markdown(f"""
<style>
body {{background: {THEME_CONFIG['background']}; color: {THEME_CONFIG['text']}}
.stChatMessage {{border-radius: 15px; padding: 1.5rem; margin: 1rem 0}}
.stChatInput {{position: fixed; bottom: 20px; width: 65%;}}
@keyframes pulse {{0% {{opacity:1}} 50% {{opacity:0.5}} 100% {{opacity:1}}}}
</style>
""", unsafe_allow_html=True)
# Main Chat
st.title("⚕️ MedPalm AI Doctor")
st.caption("Instant Medical Analysis & Symptom Checker")
model = load_model()
for entry in st.session_state.history:
with st.chat_message("user"):
st.markdown(entry["query"])
with st.chat_message("bot"):
st.markdown(entry["response"])
if prompt := st.chat_input("Describe your symptoms..."):
with st.spinner("🔍 Analyzing..."):
response = generate_response(prompt)
st.session_state.history.append({
"query": prompt,
"response": response,
"time": datetime.now().strftime("%H:%M:%S")
})
st.rerun()