import os import streamlit as st from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv('hf') os.environ["HF_TOKEN"] = os.getenv('hf') st.set_page_config(page_title="🧠 MentorVerse: Ask. Learn. Grow.", page_icon="🎓", layout="wide") st.markdown(""" """, unsafe_allow_html=True) st.markdown("

🧠 MentorVerse: Ask. Learn. Grow.

", unsafe_allow_html=True) st.markdown("### 👇 Choose your mentor specialization:") cols = st.columns(6) mentors = { "python": "🐍 Python", "ml": "🤖 ML", "dl": "🧠 DL", "advance statistics": "📊 Stats", "sql": "🗃️ SQL", "power bi": "📈 Power BI" } for i, (key, label) in enumerate(mentors.items()): with cols[i]: st.markdown(f"
{label}
", unsafe_allow_html=True) mentor_type = st.selectbox("🎓 Who would you like to talk to?", [""] + list(mentors.keys())) if mentor_type: st.markdown(f"## 🧠 Talking to your {mentors[mentor_type]} Mentor") experience = st.slider("📊 Your Experience (Years)", 0, 6, 1) user_input = st.text_input("💬 Ask your question below:") output_container = st.empty() model_map = { "python": "meta-llama/Llama-3.1-8B-Instruct", "ml": "deepseek-ai/DeepSeek-R1", "dl": "google/gemma-7b-it", "advance statistics": "mistralai/Mistral-7B-Instruct-v0.1", "sql": "google/gemma-7b-it", "power bi": "tiiuae/falcon-7b-instruct" } model = HuggingFaceEndpoint( repo_id=model_map[mentor_type], provider="nebius", temperature=0.5, max_new_tokens=300, task="conversational" ) chat_model = ChatHuggingFace( llm=model, repo_id=model.repo_id, provider="nebius", temperature=0.5, max_new_tokens=300, task="conversational" ) if st.button("🚀 Ask Mentor") and user_input: prompt = ChatPromptTemplate.from_messages([ SystemMessagePromptTemplate.from_template( f"You are a senior {mentor_type.upper()} mentor helping a learner with {experience} years of experience. Respond with clarity and actionable insights." ), HumanMessagePromptTemplate.from_template("{question}") ]) formatted_prompt = prompt.format_messages(question=user_input) with st.spinner("🧠 Mentor is thinking..."): response = chat_model.invoke(formatted_prompt) output_container.markdown("#### 👤 You:") output_container.markdown(f"`{user_input}`") output_container.markdown("#### 🧠 Mentor:") output_container.success(response.content) if st.button("❌ Clear Chat"): output_container.empty()