Ai-Mentor / src /pages /model.py
DOMMETI's picture
Update src/pages/model.py
4057ece verified
import os
import streamlit as st
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("key")
os.environ['HF_TOKEN'] = os.getenv("key")
st.set_page_config(page_title="πŸ‘¨β€πŸ« Multi-Mentor Chat", page_icon="🧠")
# --- Custom CSS for styling ---
st.markdown("""
<style>
body {
background: linear-gradient(135deg, #1f2937, #111827);
color: #e0e7ff;
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
h1, h2, h3 {
text-align: center;
color: #00FFFF;
text-shadow: 0 0 12px #00FFFFaa;
font-weight: 700;
margin-bottom: 0.5rem;
}
.mentor-columns div {
text-align: center;
padding: 10px 0;
font-weight: 600;
font-size: 1rem;
border-radius: 8px;
user-select: none;
}
.mentor-columns .col1 { color: #00FF00; } /* Green */
.mentor-columns .col2 { color: #FFA500; } /* Orange */
.mentor-columns .col3 { color: #1E90FF; } /* Dodger Blue */
.mentor-columns .col4 { color: #8A2BE2; } /* BlueViolet */
.mentor-columns .col5 { color: #FFD700; } /* Gold */
.mentor-columns .col6 { color: #FF4500; } /* OrangeRed */
.selectbox-container {
max-width: 400px;
margin: 0 auto 15px auto;
}
.slider-container {
max-width: 400px;
margin: 15px auto 20px auto;
}
.input-container {
max-width: 700px;
margin: 10px auto 30px auto;
}
.button-row {
text-align: center;
margin-bottom: 30px;
}
button.stButton > button {
background: linear-gradient(90deg, #00FFFF, #00CCFF);
border: none;
padding: 12px 30px;
color: #111;
font-weight: 700;
border-radius: 25px;
box-shadow: 0 0 12px #00FFFFcc;
transition: transform 0.3s ease, box-shadow 0.3s ease;
}
button.stButton > button:hover {
transform: scale(1.1);
box-shadow: 0 0 20px #00FFFFee;
}
.output-container {
max-width: 700px;
margin: 0 auto 40px auto;
background: rgba(0, 255, 255, 0.1);
padding: 20px;
border-radius: 15px;
box-shadow: 0 0 12px #00FFFF55;
white-space: pre-wrap;
font-size: 1.1rem;
line-height: 1.4;
color: #e0f7ff;
min-height: 80px;
}
</style>
""", unsafe_allow_html=True)
# --- Title and Mentor Labels ---
st.title("🧠 Multi-Topic Mentor")
st.markdown('<div class="mentor-columns" style="display:flex; justify-content: space-around; margin-bottom:15px;">'
'<div class="col1">🟒 Python</div>'
'<div class="col2">🟠 ML</div>'
'<div class="col3">πŸ”΅ DL</div>'
'<div class="col4">🟣 Stats</div>'
'<div class="col5">🟑 Data_Analysis</div>'
'<div class="col6">πŸ”΄ SQL & PowerBI</div>'
'</div>', unsafe_allow_html=True)
# --- Mentor selectbox ---
st.markdown('<div class="selectbox-container">', unsafe_allow_html=True)
mentor_type = st.selectbox("Choose a mentor:", ["", "python", "machine_learning", "deep_learning", "stats", "data_anaylasis", "sql and powerbi"])
st.markdown('</div>', unsafe_allow_html=True)
if mentor_type:
st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat")
st.markdown('<div class="slider-container">', unsafe_allow_html=True)
experience = st.slider("Your experience (in years):", 0, 20, 1)
st.markdown('</div>', unsafe_allow_html=True)
st.markdown('<div class="input-container">', unsafe_allow_html=True)
user_input = st.text_input("Ask your question:")
st.markdown('</div>', unsafe_allow_html=True)
output_container = st.empty()
if mentor_type == "python":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "machine_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "deep_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "stats":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "data_anaylasis":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "sql and powerbi":
model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
chat_model = ChatHuggingFace(
llm=model,
repo_id=model.repo_id,
provider="nebius",
temperature=0.5,
max_new_tokens=150,
task="conversational"
)
# Buttons row
st.markdown('<div class="button-row">', unsafe_allow_html=True)
if st.button("Ask") and user_input:
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience."
),
HumanMessagePromptTemplate.from_template("{question}")
])
formatted_prompt = prompt.format_messages(question=user_input)
with st.spinner("Mentor is thinking..."):
response = chat_model.invoke(formatted_prompt)
output_container.markdown(f"**πŸ‘€ You:** {user_input}")
output_container.markdown(f"**🧠 Mentor:** {response.content}")
if st.button("Clear Output"):
output_container.empty()
st.markdown('</div>', unsafe_allow_html=True)