sree4411 commited on
Commit
7586f19
·
verified ·
1 Parent(s): bfc965b

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +92 -51
src/streamlit_app.py CHANGED
@@ -2,70 +2,111 @@ import os
2
  import streamlit as st
3
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
 
5
 
 
6
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
7
  os.environ['HF_TOKEN'] = os.getenv("key")
8
 
9
- st.set_page_config(page_title="👨‍🏫 Multi-Mentor Chat", page_icon="🧠")
10
- st.title("🧠 Multi-Topic Mentor")
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- st.markdown("### Select your Mentor:")
14
- col1, col2, col3, col4, col5, col6 = st.columns(6)
15
- with col1: st.write("🟢 Python")
16
- with col2: st.write("🟠 ML")
17
- with col3: st.write("🔵 DL")
18
- with col4: st.write("🟣 Stats")
19
- with col5: st.write("🟡 Data_Anaylasis")
20
- with col6: st.write("🔴 sql and powerbi")
21
 
22
- mentor_type = st.selectbox("Choose a mentor:", ["", "python", "machine_learning", "deep_learning", "stats", "data_anaylasis", "sql and powerbi"])
 
 
23
 
24
- if mentor_type:
25
- st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat")
26
- experience = st.slider("Your experience (in years):", 0, 20, 1)
27
- user_input = st.text_input("Ask your question:")
 
 
 
28
 
 
 
 
 
 
 
 
 
29
 
30
- output_container = st.empty()
 
 
 
 
 
 
 
31
 
32
- if mentor_type == "python":
33
- model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
34
- elif mentor_type == "machine_learning":
35
- model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
36
- elif mentor_type == "deep_learning":
37
- model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
38
- elif mentor_type == "stats":
39
- model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
40
- elif mentor_type == "data_anaylasis":
41
- model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
42
- elif mentor_type == "sql and powerbi":
43
- model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
44
 
45
- chat_model = ChatHuggingFace(
46
- llm=model,
47
- repo_id=model.repo_id,
48
- provider="nebius",
49
- temperature=0.5,
50
- max_new_tokens=150,
51
- task="conversational"
52
- )
53
 
 
 
 
54
 
55
- if st.button("Ask") and user_input:
56
- prompt = ChatPromptTemplate.from_messages([
57
- SystemMessagePromptTemplate.from_template(
58
- f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience."
59
- ),
60
- HumanMessagePromptTemplate.from_template("{question}")
61
- ])
62
- formatted_prompt = prompt.format_messages(question=user_input)
63
 
64
- with st.spinner("Mentor is thinking..."):
65
- response = chat_model.invoke(formatted_prompt)
 
 
66
 
67
- output_container.markdown(f"👤 You: {user_input}")
68
- output_container.markdown(f"🧠 Mentor: {response.content}")
69
-
70
- if st.button("Clear Output"):
71
- output_container.empty()
 
 
 
 
 
 
 
2
  import streamlit as st
3
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
5
+ from io import StringIO
6
 
7
+ # Set API tokens
8
  os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
9
  os.environ['HF_TOKEN'] = os.getenv("key")
10
 
11
+ # Page setup
12
+ st.set_page_config(page_title="🧠 Multi-Mentor Chat", page_icon="🧠")
13
+ st.title("👨‍🏫 Multi-Topic Mentor Chatbot")
14
 
15
+ # Define mentors and models
16
+ MENTOR_MODELS = {
17
+ "Python": {
18
+ "repo_id": "meta-llama/Llama-3.1-8B-Instruct",
19
+ "provider": "nebius"
20
+ },
21
+ "Machine Learning": {
22
+ "repo_id": "deepseek-ai/DeepSeek-R1",
23
+ "provider": "nebius"
24
+ },
25
+ "Deep Learning": {
26
+ "repo_id": "deepseek-ai/DeepSeek-R1",
27
+ "provider": "nebius"
28
+ },
29
+ "Stats": {
30
+ "repo_id": "meta-llama/Llama-3.2-1B-Instruct",
31
+ "provider": "nebius"
32
+ },
33
+ "Data Analysis": {
34
+ "repo_id": "meta-llama/Llama-3.3-70B-Instruct",
35
+ "provider": "nebius"
36
+ },
37
+ "SQL & Power BI": {
38
+ "repo_id": "meta-llama/Meta-Llama-3-70B-Instruct",
39
+ "provider": "hyperbolic"
40
+ }
41
+ }
42
 
43
+ # Tabs for each mentor
44
+ tabs = st.tabs(list(MENTOR_MODELS.keys()))
 
 
 
 
 
 
45
 
46
+ # Initialize conversation storage
47
+ if "conversations" not in st.session_state:
48
+ st.session_state.conversations = {key: [] for key in MENTOR_MODELS.keys()}
49
 
50
+ # Iterate through each mentor tab
51
+ for tab, (mentor_name, config) in zip(tabs, MENTOR_MODELS.items()):
52
+ with tab:
53
+ st.subheader(f"🧠 {mentor_name} Mentor")
54
+ experience = st.slider(f"Your experience in {mentor_name} (years):", 0, 20, 1, key=mentor_name)
55
+ question = st.text_input(f"Ask your {mentor_name} question:", key=f"q_{mentor_name}")
56
+ output = st.empty()
57
 
58
+ # Load model
59
+ model = HuggingFaceEndpoint(
60
+ repo_id=config["repo_id"],
61
+ provider=config["provider"],
62
+ temperature=0.5,
63
+ max_new_tokens=150,
64
+ task="conversational"
65
+ )
66
 
67
+ chat_model = ChatHuggingFace(
68
+ llm=model,
69
+ repo_id=config["repo_id"],
70
+ provider=config["provider"],
71
+ temperature=0.5,
72
+ max_new_tokens=150,
73
+ task="conversational"
74
+ )
75
 
76
+ # Handle question and response
77
+ if st.button("Ask", key=f"ask_{mentor_name}") and question:
78
+ prompt = ChatPromptTemplate.from_messages([
79
+ SystemMessagePromptTemplate.from_template(
80
+ f"You are a helpful and experienced {mentor_name} mentor. The user has {experience} years of experience. Answer the question appropriately."
81
+ ),
82
+ HumanMessagePromptTemplate.from_template("{question}")
83
+ ])
84
+ formatted_prompt = prompt.format_messages(question=question)
 
 
 
85
 
86
+ with st.spinner("Mentor is thinking..."):
87
+ response = chat_model.invoke(formatted_prompt)
 
 
 
 
 
 
88
 
89
+ answer = response.content
90
+ output.markdown(f"👤 You: {question}")
91
+ output.markdown(f"🧠 Mentor: {answer}")
92
 
93
+ # Save conversation
94
+ st.session_state.conversations[mentor_name].append(f"You: {question}")
95
+ st.session_state.conversations[mentor_name].append(f"Mentor: {answer}")
 
 
 
 
 
96
 
97
+ # Clear output
98
+ if st.button("Clear", key=f"clear_{mentor_name}"):
99
+ output.empty()
100
+ st.session_state.conversations[mentor_name] = []
101
 
102
+ # Download conversation
103
+ if st.session_state.conversations[mentor_name]:
104
+ convo_text = "\n".join(st.session_state.conversations[mentor_name])
105
+ buffer = StringIO(convo_text)
106
+ st.download_button(
107
+ label="⬇️ Download Conversation",
108
+ data=buffer,
109
+ file_name=f"{mentor_name.lower().replace(' ', '_')}_conversation.txt",
110
+ mime="text/plain",
111
+ key=f"download_{mentor_name}"
112
+ )