DasariHarshitha commited on
Commit
fc65e68
Β·
verified Β·
1 Parent(s): a0f9a69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -94
app.py CHANGED
@@ -1,100 +1,42 @@
1
  import streamlit as st
2
  from langchain_openai import ChatOpenAI
3
- from langchain.schema import AIMessage, HumanMessage
4
  from langchain.memory import ConversationBufferMemory
5
 
6
- # ---------------------------
7
- # Streamlit UI Config
8
- # ---------------------------
9
- st.set_page_config(page_title="AI Data Science Tutor", layout="wide")
10
- st.title("πŸ“Š AI Conversational Data Science Tutor")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # ---------------------------
13
- # Sidebar - Settings
14
- # ---------------------------
15
- st.sidebar.header("βš™οΈ Settings")
16
-
17
- mode = st.sidebar.radio(
18
- "Choose Tutor Mode:",
19
- ("Dummy Tutor (No API Key)", "OpenAI Tutor (API Key Required)")
20
- )
21
-
22
- if mode == "OpenAI Tutor (API Key Required)":
23
- openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key:", type="password")
24
  else:
25
- openai_api_key = None
26
-
27
- # ---------------------------
28
- # Initialize Memory
29
- # ---------------------------
30
- if "memory" not in st.session_state:
31
- st.session_state.memory = ConversationBufferMemory(return_messages=True)
32
-
33
- if "chat_history" not in st.session_state:
34
- st.session_state.chat_history = []
35
-
36
- # ---------------------------
37
- # Dummy Tutor Response Logic
38
- # ---------------------------
39
- def dummy_tutor_response(user_query):
40
- q = user_query.lower()
41
- if "regression" in q:
42
- return "πŸ“Š Regression is a supervised ML technique used to predict continuous values."
43
- elif "classification" in q:
44
- return "πŸ” Classification predicts categorical labels, e.g., spam vs not spam."
45
- elif "neural" in q or "deep learning" in q:
46
- return "🧠 Neural Networks consist of layers of neurons that learn patterns from data."
47
- elif "pca" in q:
48
- return "πŸ“‰ PCA reduces dimensions while preserving variance."
49
- elif "accuracy" in q or "precision" in q or "recall" in q:
50
- return "βœ… Accuracy = correct predictions / total. For imbalanced data, use precision, recall, or F1-score."
51
- elif "clustering" in q:
52
- return "πŸ“Œ Clustering groups similar points without labels (unsupervised learning)."
53
- elif "overfitting" in q:
54
- return "⚠️ Overfitting means the model memorizes data instead of generalizing."
55
- else:
56
- return f"πŸ€” I didn’t fully get that. Can you rephrase your Data Science question? (You asked: {user_query})"
57
-
58
- # ---------------------------
59
- # Get AI Tutor Response
60
- # ---------------------------
61
- def get_tutor_response(user_query):
62
- if mode == "Dummy Tutor (No API Key)":
63
- return dummy_tutor_response(user_query)
64
-
65
- if mode == "OpenAI Tutor (API Key Required)" and openai_api_key:
66
- llm = ChatOpenAI(
67
- model="gpt-4o-mini", # You can also try "gpt-4o" or "gpt-4"
68
- openai_api_key=openai_api_key,
69
- temperature=0.5
70
- )
71
-
72
- # Add user message to memory
73
- st.session_state.memory.chat_memory.add_user_message(user_query)
74
-
75
- # Generate response
76
- response = llm(st.session_state.memory.chat_memory.messages)
77
-
78
- # Add AI message to memory
79
- st.session_state.memory.chat_memory.add_ai_message(response.content)
80
-
81
- return response.content
82
-
83
- return "⚠️ Please provide your OpenAI API key in the sidebar."
84
-
85
- # ---------------------------
86
- # Chat UI
87
- # ---------------------------
88
- user_query = st.chat_input("Ask me a Data Science question...")
89
-
90
- if user_query:
91
- response = get_tutor_response(user_query)
92
- st.session_state.chat_history.append(("You", user_query))
93
- st.session_state.chat_history.append(("Tutor", response))
94
-
95
- # Display Chat History
96
- for sender, msg in st.session_state.chat_history:
97
- if sender == "You":
98
- st.markdown(f"**πŸ‘©β€πŸ’» {sender}:** {msg}")
99
- else:
100
- st.markdown(f"**πŸ€– {sender}:** {msg}")
 
1
  import streamlit as st
2
  from langchain_openai import ChatOpenAI
3
+ from langchain.chains import ConversationChain
4
  from langchain.memory import ConversationBufferMemory
5
 
6
+ # Sidebar for OpenAI key
7
+ st.sidebar.title("βš™οΈ Settings")
8
+ api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password")
9
+
10
+ st.title("πŸ€– AI Conversational Data Science Tutor")
11
+
12
+ if api_key:
13
+ # Initialize model
14
+ llm = ChatOpenAI(
15
+ model="gpt-4o-mini",
16
+ temperature=0.5,
17
+ openai_api_key=api_key
18
+ )
19
+
20
+ # Memory for conversation awareness
21
+ if "memory" not in st.session_state:
22
+ st.session_state.memory = ConversationBufferMemory(return_messages=True)
23
+
24
+ # Build the chain
25
+ conversation = ConversationChain(
26
+ llm=llm,
27
+ memory=st.session_state.memory,
28
+ verbose=False
29
+ )
30
+
31
+ # User input
32
+ user_input = st.chat_input("Ask a Data Science question...")
33
+ if user_input:
34
+ with st.chat_message("user"):
35
+ st.write(user_input)
36
+
37
+ with st.chat_message("assistant"):
38
+ response = conversation.predict(input=f"You are a Data Science Tutor. Only answer Data Science questions. User asked: {user_input}")
39
+ st.write(response)
40
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  else:
42
+ st.warning("πŸ”‘ Please enter your OpenAI API key in the sidebar to start.")