|
|
import os |
|
|
import streamlit as st |
|
|
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate |
|
|
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace |
|
|
|
|
|
|
|
|
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key") |
|
|
os.environ['HF_TOKEN'] = os.getenv("key") |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="π§ AI Mentor Hub", page_icon="π") |
|
|
st.title("π AI Mentor Hub - Learn Smarter, Faster!") |
|
|
|
|
|
st.markdown("### π Select your mentor and ask any question:") |
|
|
col1, col2, col3, col4, col5, col6 = st.columns(6) |
|
|
with col1: st.write("π Python") |
|
|
with col2: st.write("π€ ML") |
|
|
with col3: st.write("π§ DL") |
|
|
with col4: st.write("π Stats") |
|
|
with col5: st.write("π§Ύ Data_Anaylasis") |
|
|
with col6: st.write("ποΈ sql and powerbi") |
|
|
|
|
|
|
|
|
mentor_labels = { |
|
|
"python": "π Python", |
|
|
"machine_learning": "π€ Machine Learning", |
|
|
"deep_learning": "π§ Deep Learning", |
|
|
"stats": "π Statistics", |
|
|
"data_analysis": "π§Ύ Data Analysis", |
|
|
"sql_powerbi": "ποΈ SQL & Power BI" |
|
|
} |
|
|
|
|
|
mentor_configs = { |
|
|
"python": {"repo_id": "meta-llama/Llama-3.1-8B-Instruct", "provider": "nebius"}, |
|
|
"machine_learning": {"repo_id": "deepseek-ai/DeepSeek-R1", "provider": "nebius"}, |
|
|
"deep_learning": {"repo_id": "deepseek-ai/DeepSeek-R1", "provider": "nebius"}, |
|
|
"stats": {"repo_id": "meta-llama/Llama-3.2-1B-Instruct", "provider": "nebius"}, |
|
|
"data_analysis": {"repo_id": "meta-llama/Llama-3.3-70B-Instruct", "provider": "nebius"}, |
|
|
"sql_powerbi": {"repo_id": "meta-llama/Meta-Llama-3-70B-Instruct", "provider": "hyperbolic"} |
|
|
} |
|
|
|
|
|
|
|
|
mentor_choice = st.selectbox("Choose a mentor topic:", [""] + list(mentor_labels.keys())) |
|
|
|
|
|
|
|
|
if "conversation" not in st.session_state: |
|
|
st.session_state.conversation = [] |
|
|
|
|
|
if mentor_choice: |
|
|
label = mentor_labels[mentor_choice] |
|
|
st.subheader(f"{label} Mentor Chat") |
|
|
|
|
|
|
|
|
experience = st.slider("π
Your experience (years):", 0, 20, 1) |
|
|
question = st.text_input("π¬ Ask your question:") |
|
|
|
|
|
|
|
|
config = mentor_configs[mentor_choice] |
|
|
repo_id = config["repo_id"] |
|
|
provider = config["provider"] |
|
|
|
|
|
model = HuggingFaceEndpoint( |
|
|
repo_id=repo_id, |
|
|
provider=provider, |
|
|
temperature=0.5, |
|
|
max_new_tokens=150, |
|
|
task="conversational" |
|
|
) |
|
|
|
|
|
chat_model = ChatHuggingFace( |
|
|
llm=model, |
|
|
repo_id=repo_id, |
|
|
provider=provider, |
|
|
temperature=0.5, |
|
|
max_new_tokens=150, |
|
|
task="conversational" |
|
|
) |
|
|
|
|
|
output_box = st.empty() |
|
|
|
|
|
if st.button("π§ Get Answer"): |
|
|
if not question.strip(): |
|
|
st.warning("β Please enter a question.") |
|
|
else: |
|
|
prompt = ChatPromptTemplate.from_messages([ |
|
|
SystemMessagePromptTemplate.from_template( |
|
|
f"You are a helpful and expert {mentor_choice.replace('_', ' ').title()} mentor. The user has {experience} years of experience. Answer clearly." |
|
|
), |
|
|
HumanMessagePromptTemplate.from_template("{question}") |
|
|
]) |
|
|
messages = prompt.format_messages(question=question) |
|
|
|
|
|
with st.spinner("Thinking..."): |
|
|
response = chat_model.invoke(messages) |
|
|
|
|
|
answer = response.content |
|
|
output_box.markdown(f"π€ **You:** {question}") |
|
|
output_box.markdown(f"π§ **Mentor:** {answer}") |
|
|
st.session_state.conversation.append(f"You: {question}") |
|
|
st.session_state.conversation.append(f"Mentor: {answer}") |
|
|
|
|
|
if st.button("ποΈ Clear Chat"): |
|
|
output_box.empty() |
|
|
st.session_state.conversation = [] |
|
|
|
|
|
if st.session_state.conversation: |
|
|
convo_text = "\n".join(st.session_state.conversation) |
|
|
st.download_button( |
|
|
"β¬οΈ Download Conversation", |
|
|
data=convo_text, |
|
|
file_name=f"{mentor_choice}_chat.txt", |
|
|
mime="text/plain" |
|
|
) |
|
|
|