sree4411 commited on
Commit
9d311ec
Β·
verified Β·
1 Parent(s): 0720394

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +68 -2
src/streamlit_app.py CHANGED
@@ -1,5 +1,71 @@
1
 
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
 
1
 
2
+ import os
 
3
  import streamlit as st
4
+ from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
5
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
6
+
7
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
8
+ os.environ['HF_TOKEN'] = os.getenv("key")
9
+
10
+ st.set_page_config(page_title="πŸ‘¨β€πŸ« Multi-Mentor Chat", page_icon="🧠")
11
+ st.title("🧠 Multi-Topic Mentor")
12
+
13
+
14
+ st.markdown("### Select your Mentor:")
15
+ col1, col2, col3, col4, col5, col6 = st.columns(6)
16
+ with col1: st.write("🟒 Python")
17
+ with col2: st.write("🟠 ML")
18
+ with col3: st.write("πŸ”΅ DL")
19
+ with col4: st.write("🟣 Stats")
20
+ with col5: st.write("🟑 Data_Anaylasis")
21
+ with col6: st.write("πŸ”΄ sql and powerbi")
22
+
23
+ mentor_type = st.selectbox("Choose a mentor:", ["", "python", "machine_learning", "deep_learning", "stats", "data_anaylasis", "sql and powerbi"])
24
+
25
+ if mentor_type:
26
+ st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat")
27
+ experience = st.slider("Your experience (in years):", 0, 20, 1)
28
+ user_input = st.text_input("Ask your question:")
29
+ output_container = st.empty()
30
+
31
+ if mentor_type == "python":
32
+ model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
33
+ elif mentor_type == "machine_learning":
34
+ model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
35
+ elif mentor_type == "deep_learning":
36
+ model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
37
+ elif mentor_type == "stats":
38
+ model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
39
+ elif mentor_type == "data_anaylasis":
40
+ model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
41
+ elif mentor_type == "sql and powerbi":
42
+ model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
43
+ chat_model = ChatHuggingFace(
44
+ llm=model,
45
+ repo_id=model.repo_id,
46
+ provider="nebius",
47
+ temperature=0.5,
48
+ max_new_tokens=150,
49
+ task="conversational"
50
+ )
51
+
52
+
53
+ if st.button("Ask") and user_input:
54
+ prompt = ChatPromptTemplate.from_messages([
55
+ SystemMessagePromptTemplate.from_template(
56
+ f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience."
57
+ ),
58
+ HumanMessagePromptTemplate.from_template("{question}")
59
+ ])
60
+ formatted_prompt = prompt.format_messages(question=user_input)
61
+
62
+ with st.spinner("Mentor is thinking..."):
63
+ response = chat_model.invoke(formatted_prompt)
64
+
65
+ output_container.markdown(f"πŸ‘€ You: {user_input}")
66
+ output_container.markdown(f"🧠 Mentor: {response.content}")
67
+
68
+ if st.button("Clear Output"):
69
+ output_container.empty()
70
+ ο»Ώ
71