File size: 3,565 Bytes
f630b9e
 
39e60f4
f630b9e
c2b781b
f630b9e
83687f1
 
 
f630b9e
39e60f4
f630b9e
 
 
 
 
83687f1
 
 
 
 
 
 
39e60f4
c2b781b
39e60f4
 
83687f1
 
 
 
 
 
c2b781b
39e60f4
83687f1
39e60f4
 
 
 
83687f1
 
39e60f4
 
bdc7837
8b29d63
f630b9e
83687f1
990ca55
c2b781b
bdc7837
83687f1
7f792ef
 
f630b9e
83687f1
8b29d63
 
ca57036
c2b781b
7f792ef
b77d772
c2b781b
b77d772
c2b781b
 
 
 
 
 
 
 
8b29d63
c2b781b
8b29d63
83687f1
bdc7837
20c45d0
7f792ef
ca57036
 
7f792ef
 
 
bdc7837
 
7f792ef
990ca55
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import streamlit as st
import torch
from groq import Groq
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# βœ… Ensure set_page_config() is the first Streamlit command
st.set_page_config(page_title="AI Study Assistant", page_icon="πŸ€–", layout="wide")

# Set up the Groq API Key
GROQ_API_KEY = "your_groq_api_key_here"  # Replace with your actual key
os.environ["GROQ_API_KEY"] = GROQ_API_KEY

# Initialize the Groq client
client = Groq(api_key=GROQ_API_KEY)

# βœ… Ensure Accelerate is installed
try:
    import accelerate  # noqa: F401
except ImportError:
    st.error("⚠️ `accelerate` library is required. Install it with: `pip install accelerate`")

# βœ… Initialize Hugging Face DeepSeek R1 model correctly
MODEL_NAME = "deepseek-ai/DeepSeek-R1"

try:
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        trust_remote_code=True,
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto" if torch.cuda.is_available() else None
    )

    def generate_response_hf(user_message):
        inputs = tokenizer(user_message, return_tensors="pt").to(model.device)
        outputs = model.generate(**inputs, max_length=200)
        return tokenizer.decode(outputs[0], skip_special_tokens=True)

except Exception as e:
    st.error(f"❌ Error loading DeepSeek-R1: {str(e)}")
    generate_response_hf = lambda x: "⚠️ Error: Model not loaded."

# Streamlit UI setup
st.title("πŸ“š Subject-specific AI Chatbot")
st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")

# Sidebar settings
st.sidebar.header("βš™οΈ Settings")
chat_model = st.sidebar.radio("Choose AI Model:", ["Groq API", "DeepSeek R1 (Hugging Face)"])

# Initialize session state for conversation
if 'conversation_history' not in st.session_state:
    st.session_state.conversation_history = []

# Subjects list
subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]

def generate_chatbot_response(user_message):
    related_subject = next((subject for subject in subjects if subject.lower() in user_message.lower()), None)
    
    if "kisne banaya" in user_message.lower() or "who created you" in user_message.lower():
        return "I was created by Abdul Basit 😊"

    prompt = f"You are a helpful AI chatbot for studying {related_subject if related_subject else 'general knowledge'}. The user is asking: {user_message}. Provide a detailed, helpful response."
    
    if chat_model == "Groq API":
        chat_completion = client.chat.completions.create(
            messages=[{"role": "user", "content": prompt}],
            model="deepseek-chat"
        )
        return chat_completion.choices[0].message.content
    else:
        return generate_response_hf(prompt)

# Chat input
st.markdown("### πŸ’¬ Chat with me")
user_input = st.chat_input("Ask me a subject-related question:")

if user_input:
    chatbot_response = generate_chatbot_response(user_input)
    st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))

# Display chat history
st.markdown("---")
st.markdown("### πŸ—¨οΈ Chat History")
for question, answer in st.session_state.conversation_history:
    st.write(f"<div class='chat-bubble'><b>{question}</b></div>", unsafe_allow_html=True)
    st.write(f"<div class='chat-bubble'>{answer}</div>", unsafe_allow_html=True)