File size: 2,679 Bytes
dea06d0
 
 
f054618
dea06d0
 
f054618
dea06d0
 
 
a44e524
dea06d0
 
a44e524
dea06d0
 
 
 
a44e524
dea06d0
 
 
f054618
dea06d0
 
 
 
 
 
 
 
 
 
 
 
 
 
68296f4
dea06d0
 
 
68296f4
dea06d0
 
 
 
 
68296f4
dea06d0
 
f054618
dea06d0
 
 
 
 
 
 
 
f054618
dea06d0
 
 
 
 
 
 
f054618
dea06d0
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import streamlit as st
from groq import Groq

# βœ… Fix TensorFlow CPU warnings
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"

# βœ… Set up the Groq API Key
GROQ_API_KEY = "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"
os.environ["GROQ_API_KEY"] = GROQ_API_KEY

# βœ… Initialize the Groq client (Fixed 'proxies' issue)
client = Groq(api_key=GROQ_API_KEY)

# βœ… Streamlit UI setup
st.set_page_config(page_title="AI Disease Detection Assistant", page_icon="🩺", layout="wide")
st.title("🩺 AI Disease Detection Chatbot")
st.write("Hello! I'm your AI assistant for disease-related queries. Ask me about symptoms, treatments, or general health advice.")

# βœ… Sidebar settings
st.sidebar.header("βš™οΈ Settings")
chat_theme = st.sidebar.radio("Choose a theme:", ["Light", "Dark", "Blue", "Green"])

# βœ… Apply themes
themes = {
    "Dark": "#1e1e1e",
    "Blue": "#e3f2fd",
    "Green": "#e8f5e9",
    "Light": "#ffffff"
}
st.markdown(f"""
    <style>
        body {{ background-color: {themes[chat_theme]}; color: black; }}
        .stButton>button {{ background-color: #4CAF50; color: white; }}
        .chat-bubble {{ background-color: #f1f1f1; border-radius: 10px; padding: 10px; }}
    </style>
""", unsafe_allow_html=True)

# βœ… Session state for chat history
if 'conversation_history' not in st.session_state:
    st.session_state.conversation_history = []

# βœ… Function to generate AI response
def generate_chatbot_response(user_message):
    # Custom responses
    if "who created you" in user_message.lower():
        return "I was created by Abdel Basit. 😊"

    # AI model response
    prompt = f"You are a medical AI assistant. The user asks: {user_message}. Provide a detailed, accurate medical response."
    
    try:
        chat_completion = client.chat.completions.create(
            messages=[{"role": "user", "content": prompt}],
            model="llama3-8b-8192"
        )
        return chat_completion.choices[0].message.content
    except Exception as e:
        return f"⚠️ Error: {str(e)}"

# βœ… User chat input
st.markdown("### πŸ’¬ Chat with me")
user_input = st.chat_input("Ask me a health-related question:")

if user_input:
    chatbot_response = generate_chatbot_response(user_input)
    st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))

# βœ… Display chat history
st.markdown("---")
st.markdown("### πŸ—¨οΈ Chat History")
for question, answer in st.session_state.conversation_history:
    st.write(f"<div class='chat-bubble'><b>{question}</b></div>", unsafe_allow_html=True)
    st.write(f"<div class='chat-bubble'>{answer}</div>", unsafe_allow_html=True)