import os
import streamlit as st
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
os.environ['HF_TOKEN'] = os.getenv("key")
st.set_page_config(page_title="👨🏫 Multi-Mentor Chat", page_icon="🧠")
# --- Custom CSS for styling ---
st.markdown("""
""", unsafe_allow_html=True)
# --- Title and Mentor Labels ---
st.title("🧠 Multi-Topic Mentor")
st.markdown('
'
'
🟢 Python
'
'
🟠 ML
'
'
🔵 DL
'
'
🟣 Stats
'
'
🟡 Data_Analysis
'
'
🔴 SQL & PowerBI
'
'
', unsafe_allow_html=True)
# --- Mentor selectbox ---
st.markdown('', unsafe_allow_html=True)
mentor_type = st.selectbox("Choose a mentor:", ["", "python", "machine_learning", "deep_learning", "stats", "data_anaylasis", "sql and powerbi"])
st.markdown('
', unsafe_allow_html=True)
if mentor_type:
st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat")
st.markdown('', unsafe_allow_html=True)
experience = st.slider("Your experience (in years):", 0, 20, 1)
st.markdown('
', unsafe_allow_html=True)
st.markdown('', unsafe_allow_html=True)
user_input = st.text_input("Ask your question:")
st.markdown('
', unsafe_allow_html=True)
output_container = st.empty()
if mentor_type == "python":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "machine_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "deep_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "stats":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "data_anaylasis":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "sql and powerbi":
model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
chat_model = ChatHuggingFace(
llm=model,
repo_id=model.repo_id,
provider="nebius",
temperature=0.5,
max_new_tokens=150,
task="conversational"
)
# Buttons row
st.markdown('', unsafe_allow_html=True)
if st.button("Ask") and user_input:
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience."
),
HumanMessagePromptTemplate.from_template("{question}")
])
formatted_prompt = prompt.format_messages(question=user_input)
with st.spinner("Mentor is thinking..."):
response = chat_model.invoke(formatted_prompt)
output_container.markdown(f"**👤 You:** {user_input}")
output_container.markdown(f"**🧠 Mentor:** {response.content}")
if st.button("Clear Output"):
output_container.empty()
st.markdown('
', unsafe_allow_html=True)