import os import streamlit as st from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # Load Hugging Face token os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key") os.environ['HF_TOKEN'] = os.getenv("key") # Page config st.set_page_config(page_title="๐Ÿง  AI Mentor Hub", page_icon="๐ŸŽ“") st.title("๐ŸŽ“ AI Mentor Hub - Learn Smarter, Faster!") st.markdown("### ๐Ÿ” Select your mentor and ask any question:") col1, col2, col3, col4, col5, col6 = st.columns(6) with col1: st.write("๐Ÿ Python") with col2: st.write("๐Ÿค– ML") with col3: st.write("๐Ÿง  DL") with col4: st.write("๐Ÿ“Š Stats") with col5: st.write("๐Ÿงพ Data_Anaylasis") with col6: st.write("๐Ÿ—ƒ๏ธ sql and powerbi") # Emoji mentor labels mentor_labels = { "python": "๐Ÿ Python", "machine_learning": "๐Ÿค– Machine Learning", "deep_learning": "๐Ÿง  Deep Learning", "stats": "๐Ÿ“Š Statistics", "data_analysis": "๐Ÿงพ Data Analysis", "sql_powerbi": "๐Ÿ—ƒ๏ธ SQL & Power BI" } mentor_configs = { "python": {"repo_id": "meta-llama/Llama-3.1-8B-Instruct", "provider": "nebius"}, "machine_learning": {"repo_id": "deepseek-ai/DeepSeek-R1", "provider": "nebius"}, "deep_learning": {"repo_id": "deepseek-ai/DeepSeek-R1", "provider": "nebius"}, "stats": {"repo_id": "meta-llama/Llama-3.2-1B-Instruct", "provider": "nebius"}, "data_analysis": {"repo_id": "meta-llama/Llama-3.3-70B-Instruct", "provider": "nebius"}, "sql_powerbi": {"repo_id": "meta-llama/Meta-Llama-3-70B-Instruct", "provider": "hyperbolic"} } # Show options mentor_choice = st.selectbox("Choose a mentor topic:", [""] + list(mentor_labels.keys())) # Storage for conversation if "conversation" not in st.session_state: st.session_state.conversation = [] if mentor_choice: label = mentor_labels[mentor_choice] st.subheader(f"{label} Mentor Chat") # Inputs experience = st.slider("๐Ÿ“… Your experience (years):", 0, 20, 1) question = st.text_input("๐Ÿ’ฌ Ask your question:") # Model setup config = mentor_configs[mentor_choice] repo_id = config["repo_id"] provider = config["provider"] model = HuggingFaceEndpoint( repo_id=repo_id, provider=provider, temperature=0.5, max_new_tokens=150, task="conversational" ) chat_model = ChatHuggingFace( llm=model, repo_id=repo_id, provider=provider, # FIXED: use correct provider dynamically temperature=0.5, max_new_tokens=150, task="conversational" ) output_box = st.empty() if st.button("๐Ÿง  Get Answer"): if not question.strip(): st.warning("โ— Please enter a question.") else: prompt = ChatPromptTemplate.from_messages([ SystemMessagePromptTemplate.from_template( f"You are a helpful and expert {mentor_choice.replace('_', ' ').title()} mentor. The user has {experience} years of experience. Answer clearly." ), HumanMessagePromptTemplate.from_template("{question}") ]) messages = prompt.format_messages(question=question) with st.spinner("Thinking..."): response = chat_model.invoke(messages) answer = response.content output_box.markdown(f"๐Ÿ‘ค **You:** {question}") output_box.markdown(f"๐Ÿง  **Mentor:** {answer}") st.session_state.conversation.append(f"You: {question}") st.session_state.conversation.append(f"Mentor: {answer}") if st.button("๐Ÿ—‘๏ธ Clear Chat"): output_box.empty() st.session_state.conversation = [] if st.session_state.conversation: convo_text = "\n".join(st.session_state.conversation) st.download_button( "โฌ‡๏ธ Download Conversation", data=convo_text, file_name=f"{mentor_choice}_chat.txt", mime="text/plain" )