File size: 2,646 Bytes
f630b9e 4750e9c f630b9e 7f792ef 8b29d63 f630b9e 7f792ef f630b9e 8b29d63 ca57036 8b29d63 7f792ef b77d772 8b29d63 f630b9e ca57036 f630b9e ca57036 f630b9e 2df9fd8 7f792ef ca57036 7f792ef a3d99e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import os
import streamlit as st
from groq import Groq
# Set up the Groq API Key
GROQ_API_KEY = "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"
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
# Initialize the Groq client
client = Groq(api_key=GROQ_API_KEY)
# Streamlit user interface setup
st.title("Subject-specific AI Chatbot")
st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")
# Initialize session state for maintaining conversation
if 'conversation_history' not in st.session_state:
st.session_state.conversation_history = []
# Define a list of subjects for which the chatbot will answer
subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]
# Function to generate chatbot response based on subject-specific user input
def generate_chatbot_response(user_message):
# Check if the user's question is related to any subject
related_subject = None
for subject in subjects:
if subject.lower() in user_message.lower():
related_subject = subject
break
# Custom response for "who created you" type of questions
if "kisne banaya" in user_message.lower() or "who created you" in user_message.lower():
return "Mujhe Abdel Basit ne banaya hai. 😊"
if related_subject:
prompt = f"You are a helpful AI chatbot for studying {related_subject}. The user is asking: {user_message}. Provide a detailed, helpful response related to {related_subject}."
else:
prompt = f"You are a helpful AI chatbot. The user is asking: {user_message}. If the question is not related to any of the specified subjects (Chemistry, Computer, English, Islamiat, Mathematics, Physics, Urdu), politely let them know."
# Generate response using Groq API
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model="llama3-8b-8192", # You can replace with the appropriate model name
)
response = chat_completion.choices[0].message.content
return response
# User input for conversation (now placed at the bottom)
user_input = st.chat_input("Ask me a subject-related question:")
# Handle user input and display conversation
if user_input:
chatbot_response = generate_chatbot_response(user_input)
# Save the conversation history
st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))
# Display chat history
for question, answer in st.session_state.conversation_history:
st.write(f"**{question}**")
st.write(answer)
|