Spaces:
Sleeping
Sleeping
File size: 2,679 Bytes
dea06d0 f054618 dea06d0 f054618 dea06d0 a44e524 dea06d0 a44e524 dea06d0 a44e524 dea06d0 f054618 dea06d0 68296f4 dea06d0 68296f4 dea06d0 68296f4 dea06d0 f054618 dea06d0 f054618 dea06d0 f054618 dea06d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import os
import streamlit as st
from groq import Groq
# β
Fix TensorFlow CPU warnings
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
# β
Set up the Groq API Key
GROQ_API_KEY = "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"
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
# β
Initialize the Groq client (Fixed 'proxies' issue)
client = Groq(api_key=GROQ_API_KEY)
# β
Streamlit UI setup
st.set_page_config(page_title="AI Disease Detection Assistant", page_icon="π©Ί", layout="wide")
st.title("π©Ί AI Disease Detection Chatbot")
st.write("Hello! I'm your AI assistant for disease-related queries. Ask me about symptoms, treatments, or general health advice.")
# β
Sidebar settings
st.sidebar.header("βοΈ Settings")
chat_theme = st.sidebar.radio("Choose a theme:", ["Light", "Dark", "Blue", "Green"])
# β
Apply themes
themes = {
"Dark": "#1e1e1e",
"Blue": "#e3f2fd",
"Green": "#e8f5e9",
"Light": "#ffffff"
}
st.markdown(f"""
<style>
body {{ background-color: {themes[chat_theme]}; color: black; }}
.stButton>button {{ background-color: #4CAF50; color: white; }}
.chat-bubble {{ background-color: #f1f1f1; border-radius: 10px; padding: 10px; }}
</style>
""", unsafe_allow_html=True)
# β
Session state for chat history
if 'conversation_history' not in st.session_state:
st.session_state.conversation_history = []
# β
Function to generate AI response
def generate_chatbot_response(user_message):
# Custom responses
if "who created you" in user_message.lower():
return "I was created by Abdel Basit. π"
# AI model response
prompt = f"You are a medical AI assistant. The user asks: {user_message}. Provide a detailed, accurate medical response."
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama3-8b-8192"
)
return chat_completion.choices[0].message.content
except Exception as e:
return f"β οΈ Error: {str(e)}"
# β
User chat input
st.markdown("### π¬ Chat with me")
user_input = st.chat_input("Ask me a health-related question:")
if user_input:
chatbot_response = generate_chatbot_response(user_input)
st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))
# β
Display chat history
st.markdown("---")
st.markdown("### π¨οΈ Chat History")
for question, answer in st.session_state.conversation_history:
st.write(f"<div class='chat-bubble'><b>{question}</b></div>", unsafe_allow_html=True)
st.write(f"<div class='chat-bubble'>{answer}</div>", unsafe_allow_html=True)
|