import os import streamlit as st from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("keys") os.environ['HF_TOKEN'] = os.getenv("keys") st.set_page_config(page_title="AI-Powered Mentor Assistant", page_icon="🧠") st.title("🧠AI-Powered Mentor Assistant") col1, col2, col3, col4, col5, col6 = st.columns(6) import streamlit as st st.markdown("### 🎓 **Select Your Mentor:**") col1, col2, col3, col4, col5, col6 = st.columns(6) with col1: st.markdown("#### 🐍
Python", unsafe_allow_html=True) # Snake for Python with col2: st.markdown("#### 🤖
Machine Learning", unsafe_allow_html=True) # Robot for ML with col3: st.markdown("#### 🧠
Deep Learning", unsafe_allow_html=True) # Brain for DL with col4: st.markdown("#### 📊
Statistics", unsafe_allow_html=True) with col5: st.markdown("#### 📈
Data Analysis", unsafe_allow_html=True) with col6: st.markdown("#### 🗄️
SQL & Power BI", unsafe_allow_html=True) st.markdown("""

🚀 Select Your Mentor Domain:

""", unsafe_allow_html=True) # Dropdown mentor_type = st.selectbox( "", ["", "🐍 Python", "🤖 Machine Learning", "🧠 Deep Learning", "📊 Statistics", "📈 Data Analysis", "🗄️ SQL & Power BI"] ) if mentor_type: st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat") experience = st.slider("Your experience (in years):", 0, 20, 1) user_input = st.text_input("Ask your question:") output_container = st.empty() if mentor_type == "python": model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational") elif mentor_type == "machine_learning": model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational") elif mentor_type == "deep_learning": model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational") elif mentor_type == "stats": model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational") elif mentor_type == "data_anaylasis": model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational") elif mentor_type == "sql and powerbi": model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational") model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational") chat_model = ChatHuggingFace( llm=model, repo_id=model.repo_id, provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational" ) if st.button("Ask") and user_input: prompt = ChatPromptTemplate.from_messages([ SystemMessagePromptTemplate.from_template( f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience." ), HumanMessagePromptTemplate.from_template("{question}") ]) formatted_prompt = prompt.format_messages(question=user_input) with st.spinner("Mentor is thinking..."): response = chat_model.invoke(formatted_prompt) output_container.markdown(f"👤 You: {user_input}") output_container.markdown(f"🧠 Mentor: {response.content}") if st.button("Clear Output"): output_container.empty()