import os
import streamlit as st
st.set_page_config(page_title="Medical Assistant AI", page_icon="🩺")
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
# ✅ Load Hugging Face token securely
hf_token = os.getenv("hf")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = hf_token
# ✅ Custom CSS styling
st.markdown("""
""", unsafe_allow_html=True)
# ✅ Initialize model
llama_endpoint = HuggingFaceEndpoint(
repo_id="meta-llama/Llama-3.1-8B-Instruct",
provider="novita",
temperature=0.8,
max_new_tokens=256,
task="conversational"
)
llama_model = ChatHuggingFace(
llm=llama_endpoint,
repo_id="meta-llama/Llama-3.1-8B-Instruct",
provider="novita",
temperature=0.8,
max_new_tokens=256,
task="conversational"
)
# ✅ Streamlit Interface
st.markdown('
', unsafe_allow_html=True)
st.title("🩺 Medical Assistant (LLaMA 3.1)")
st.markdown("Ask your medical queries below and get AI-powered advice.")
# ✅ Conversation memory
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a highly experienced and qualified medical assistant with 10 years of experience in a busy family practice clinic. You are known for your professionalism, empathy, attention to detail, and ability to anticipate the needs of both patients and the doctor.")
]
# ✅ Input Box
user_input = st.text_input("💬 You:", placeholder="E.g. What is paracetamol?", key="input")
# ✅ Handle Input
if user_input:
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("💡 Thinking..."):
response = llama_model.invoke(st.session_state.messages)
st.session_state.messages.append(AIMessage(content=response.content))
# ✅ Display Chat History
for msg in st.session_state.messages[1:]:
if isinstance(msg, HumanMessage):
st.markdown(f'
🧑⚕️ {msg.content}
', unsafe_allow_html=True)
elif isinstance(msg, AIMessage):
st.markdown(f'
🤖 {msg.content}
', unsafe_allow_html=True)
st.markdown('
', unsafe_allow_html=True)