sree4411 commited on
Commit
db324f2
Β·
verified Β·
1 Parent(s): 29886bc

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +87 -91
src/streamlit_app.py CHANGED
@@ -2,112 +2,108 @@ import os
2
  import streamlit as st
3
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
5
- from io import StringIO
6
 
7
- # Set API tokens
8
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
9
  os.environ['HF_TOKEN'] = os.getenv("key")
10
 
11
- # Page setup
12
- st.set_page_config(page_title="🧠 Multi-Mentor Chat", page_icon="🧠")
13
- st.title("πŸ‘¨β€πŸ« Multi-Topic Mentor Chatbot")
14
-
15
- st.markdown(":red[Select a topic area to get personalized mentoring:]")
16
-
17
- # Define mentors and models
18
- MENTOR_MODELS = {
19
- "Python": {
20
- "repo_id": "meta-llama/Llama-3.1-8B-Instruct",
21
- "provider": "nebius"
22
- },
23
- "Machine Learning": {
24
- "repo_id": "deepseek-ai/DeepSeek-R1",
25
- "provider": "nebius"
26
- },
27
- "Deep Learning": {
28
- "repo_id": "deepseek-ai/DeepSeek-R1",
29
- "provider": "nebius"
30
- },
31
- "Stats": {
32
- "repo_id": "meta-llama/Llama-3.2-1B-Instruct",
33
- "provider": "nebius"
34
- },
35
- "Data Analysis": {
36
- "repo_id": "meta-llama/Llama-3.3-70B-Instruct",
37
- "provider": "nebius"
38
- },
39
- "SQL & Power BI": {
40
- "repo_id": "meta-llama/Meta-Llama-3-70B-Instruct",
41
- "provider": "hyperbolic"
42
- }
43
  }
44
 
45
- # Tabs for each mentor
46
- tabs = st.tabs(list(MENTOR_MODELS.keys()))
47
-
48
- # Initialize conversation storage
49
- if "conversations" not in st.session_state:
50
- st.session_state.conversations = {key: [] for key in MENTOR_MODELS.keys()}
51
-
52
- # Iterate through each mentor tab
53
- for tab, (mentor_name, config) in zip(tabs, MENTOR_MODELS.items()):
54
- with tab:
55
- st.subheader(f"🧠 {mentor_name} Mentor")
56
- experience = st.slider(f"Your experience in {mentor_name} (years):", 0, 20, 1, key=mentor_name)
57
- question = st.text_input(f"Ask your {mentor_name} question:", key=f"q_{mentor_name}")
58
- output = st.empty()
59
-
60
- # Load model
61
- model = HuggingFaceEndpoint(
62
- repo_id=config["repo_id"],
63
- provider=config["provider"],
64
- temperature=0.5,
65
- max_new_tokens=150,
66
- task="conversational"
67
- )
68
-
69
- chat_model = ChatHuggingFace(
70
- llm=model,
71
- repo_id=config["repo_id"],
72
- provider=config["provider"],
73
- temperature=0.5,
74
- max_new_tokens=150,
75
- task="conversational"
76
- )
77
-
78
- # Handle question and response
79
- if st.button("Ask", key=f"ask_{mentor_name}") and question:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  prompt = ChatPromptTemplate.from_messages([
81
  SystemMessagePromptTemplate.from_template(
82
- f"You are a helpful and experienced {mentor_name} mentor. The user has {experience} years of experience. Answer the question appropriately."
83
  ),
84
  HumanMessagePromptTemplate.from_template("{question}")
85
  ])
86
- formatted_prompt = prompt.format_messages(question=question)
87
 
88
- with st.spinner("Mentor is thinking..."):
89
  response = chat_model.invoke(formatted_prompt)
90
 
91
- answer = response.content
92
- output.markdown(f"πŸ‘€ You: {question}")
93
- output.markdown(f"🧠 Mentor: {answer}")
94
-
95
- # Save conversation
96
- st.session_state.conversations[mentor_name].append(f"You: {question}")
97
- st.session_state.conversations[mentor_name].append(f"Mentor: {answer}")
98
 
99
- # Clear output
100
- if st.button("Clear", key=f"clear_{mentor_name}"):
101
- output.empty()
102
- st.session_state.conversations[mentor_name] = []
 
 
103
 
104
- # Download conversation
105
- if st.session_state.conversations[mentor_name]:
106
- convo_text = "\n".join(st.session_state.conversations[mentor_name])
 
 
 
 
 
107
  st.download_button(
108
- label="⬇️ Download Conversation",
109
- data=convo_text, # ← Fixed: Use string directly
110
- file_name=f"{mentor_name.lower().replace(' ', '_')}_conversation.txt",
111
- mime="text/plain",
112
- key=f"download_{mentor_name}"
113
  )
 
2
  import streamlit as st
3
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
 
5
 
6
+ # API key setup
7
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
8
  os.environ['HF_TOKEN'] = os.getenv("key")
9
 
10
+ # Streamlit config
11
+ st.set_page_config(page_title="🧠 Multi-Mentor Chat", page_icon="πŸŽ“", layout="wide")
12
+ st.title("πŸŽ“ Multi-Topic Mentor Assistant")
13
+ st.markdown("Get expert help in different data & tech domains. Choose a mentor below to get started:")
14
+
15
+ # Mentor options with emojis and display names
16
+ MENTORS = {
17
+ "python": ("🐍 Python", "meta-llama/Llama-3.1-8B-Instruct", "nebius"),
18
+ "machine_learning": ("πŸ€– ML", "deepseek-ai/DeepSeek-R1", "nebius"),
19
+ "deep_learning": ("🧠 DL", "deepseek-ai/DeepSeek-R1", "nebius"),
20
+ "stats": ("πŸ“Š Stats", "meta-llama/Llama-3.2-1B-Instruct", "nebius"),
21
+ "data_analysis": ("πŸ“ˆ Data Analysis", "meta-llama/Llama-3.3-70B-Instruct", "nebius"),
22
+ "sql_powerbi": ("πŸ—„οΈ SQL + Power BI", "meta-llama/Meta-Llama-3-70B-Instruct", "hyperbolic")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  }
24
 
25
+ # Display custom styled buttons in columns
26
+ st.markdown("### πŸ§‘β€πŸ« Choose a Mentor Domain")
27
+ cols = st.columns(len(MENTORS))
28
+ mentor_keys = list(MENTORS.keys())
29
+ selected = None
30
+
31
+ for i, col in enumerate(cols):
32
+ with col:
33
+ if st.button(MENTORS[mentor_keys[i]][0]):
34
+ selected = mentor_keys[i]
35
+ st.session_state["selected_mentor"] = selected
36
+
37
+ # Use selection from session state if exists
38
+ if "selected_mentor" in st.session_state:
39
+ selected = st.session_state["selected_mentor"]
40
+
41
+ if selected:
42
+ mentor_name, repo_id, provider = MENTORS[selected]
43
+ st.subheader(f"{mentor_name} Mentor πŸ’¬")
44
+
45
+ # User experience slider and question input
46
+ experience = st.slider("πŸ“… Your Experience (Years):", 0, 20, 1)
47
+ user_input = st.text_input("πŸ“ Ask your question:")
48
+
49
+ output_container = st.container()
50
+
51
+ # Load the model
52
+ model = HuggingFaceEndpoint(
53
+ repo_id=repo_id,
54
+ provider=provider,
55
+ temperature=0.5,
56
+ max_new_tokens=200,
57
+ task="conversational"
58
+ )
59
+ chat_model = ChatHuggingFace(
60
+ llm=model,
61
+ repo_id=repo_id,
62
+ provider=provider,
63
+ temperature=0.5,
64
+ max_new_tokens=200,
65
+ task="conversational"
66
+ )
67
+
68
+ if "history" not in st.session_state:
69
+ st.session_state["history"] = []
70
+
71
+ # Ask button
72
+ if st.button("🧠 Ask Mentor"):
73
+ if user_input:
74
  prompt = ChatPromptTemplate.from_messages([
75
  SystemMessagePromptTemplate.from_template(
76
+ f"You are a helpful and experienced {mentor_name} mentor assisting a learner with {experience} years of experience."
77
  ),
78
  HumanMessagePromptTemplate.from_template("{question}")
79
  ])
80
+ formatted_prompt = prompt.format_messages(question=user_input)
81
 
82
+ with st.spinner("Thinking..."):
83
  response = chat_model.invoke(formatted_prompt)
84
 
85
+ # Save and show conversation
86
+ st.session_state["history"].append(f"πŸ‘€ You: {user_input}")
87
+ st.session_state["history"].append(f"🧠 Mentor: {response.content}")
 
 
 
 
88
 
89
+ # Display conversation history
90
+ if st.session_state["history"]:
91
+ with output_container:
92
+ st.markdown("### πŸ—¨οΈ Conversation")
93
+ for msg in st.session_state["history"]:
94
+ st.markdown(msg)
95
 
96
+ # Clear and download buttons
97
+ col_clear, col_download = st.columns(2)
98
+ with col_clear:
99
+ if st.button("🧹 Clear Conversation"):
100
+ st.session_state["history"] = []
101
+ output_container.empty()
102
+ with col_download:
103
+ if st.session_state["history"]:
104
  st.download_button(
105
+ label="⬇️ Download Chat",
106
+ data="\n".join(st.session_state["history"]),
107
+ file_name=f"{selected}_mentor_chat.txt",
108
+ mime="text/plain"
 
109
  )