|
|
import streamlit as st |
|
|
from langchain.chat_models import ChatOpenAI |
|
|
from langchain.schema import AIMessage, HumanMessage |
|
|
from langchain.memory import ConversationBufferMemory |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="AI Data Science Tutor", layout="wide") |
|
|
st.title("π AI Conversational Data Science Tutor") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.sidebar.header("βοΈ Settings") |
|
|
|
|
|
mode = st.sidebar.radio( |
|
|
"Choose Tutor Mode:", |
|
|
("Dummy Tutor (No API Key)", "OpenAI Tutor (API Key Required)") |
|
|
) |
|
|
|
|
|
if mode == "OpenAI Tutor (API Key Required)": |
|
|
openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key:", type="password") |
|
|
else: |
|
|
openai_api_key = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if "memory" not in st.session_state: |
|
|
st.session_state.memory = ConversationBufferMemory(return_messages=True) |
|
|
|
|
|
if "chat_history" not in st.session_state: |
|
|
st.session_state.chat_history = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dummy_tutor_response(user_query): |
|
|
q = user_query.lower() |
|
|
if "regression" in q: |
|
|
return "π Regression is a supervised ML technique used to predict continuous values." |
|
|
elif "classification" in q: |
|
|
return "π Classification predicts categorical labels, e.g., spam vs not spam." |
|
|
elif "neural" in q or "deep learning" in q: |
|
|
return "π§ Neural Networks consist of layers of neurons that learn patterns from data." |
|
|
elif "pca" in q: |
|
|
return "π PCA reduces dimensions while preserving variance." |
|
|
elif "accuracy" in q or "precision" in q or "recall" in q: |
|
|
return "β
Accuracy = correct predictions / total. For imbalanced data, use precision, recall, or F1-score." |
|
|
elif "clustering" in q: |
|
|
return "π Clustering groups similar points without labels (unsupervised learning)." |
|
|
elif "overfitting" in q: |
|
|
return "β οΈ Overfitting means the model memorizes data instead of generalizing." |
|
|
else: |
|
|
return f"π€ I didnβt fully get that. Can you rephrase your Data Science question? (You asked: {user_query})" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_tutor_response(user_query): |
|
|
if mode == "Dummy Tutor (No API Key)": |
|
|
return dummy_tutor_response(user_query) |
|
|
|
|
|
if mode == "OpenAI Tutor (API Key Required)" and openai_api_key: |
|
|
llm = ChatOpenAI( |
|
|
model="gpt-4o-mini", |
|
|
openai_api_key=openai_api_key, |
|
|
temperature=0.5 |
|
|
) |
|
|
|
|
|
|
|
|
st.session_state.memory.chat_memory.add_user_message(user_query) |
|
|
|
|
|
|
|
|
response = llm(st.session_state.memory.chat_memory.messages) |
|
|
|
|
|
|
|
|
st.session_state.memory.chat_memory.add_ai_message(response.content) |
|
|
|
|
|
return response.content |
|
|
|
|
|
return "β οΈ Please provide your OpenAI API key in the sidebar." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
user_query = st.chat_input("Ask me a Data Science question...") |
|
|
|
|
|
if user_query: |
|
|
response = get_tutor_response(user_query) |
|
|
st.session_state.chat_history.append(("You", user_query)) |
|
|
st.session_state.chat_history.append(("Tutor", response)) |
|
|
|
|
|
|
|
|
for sender, msg in st.session_state.chat_history: |
|
|
if sender == "You": |
|
|
st.markdown(f"**π©βπ» {sender}:** {msg}") |
|
|
else: |
|
|
st.markdown(f"**π€ {sender}:** {msg}") |
|
|
|