CHAT_BOTS / src /streamlit_app.py
Pasham123's picture
Update src/streamlit_app.py
18022d9 verified
import os
import streamlit as st
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("keys")
os.environ['HF_TOKEN'] = os.getenv("keys")
st.set_page_config(page_title="AI-Powered Mentor Assistant", page_icon="🧠")
st.title("🧠AI-Powered Mentor Assistant")
col1, col2, col3, col4, col5, col6 = st.columns(6)
import streamlit as st
st.markdown("### πŸŽ“ **Select Your Mentor:**")
col1, col2, col3, col4, col5, col6 = st.columns(6)
with col1:
st.markdown("#### 🐍<br><strong>Python</strong>", unsafe_allow_html=True) # Snake for Python
with col2:
st.markdown("#### πŸ€–<br><strong>Machine Learning</strong>", unsafe_allow_html=True) # Robot for ML
with col3:
st.markdown("#### 🧠<br><strong>Deep Learning</strong>", unsafe_allow_html=True) # Brain for DL
with col4:
st.markdown("#### πŸ“Š<br><strong>Statistics</strong>", unsafe_allow_html=True)
with col5:
st.markdown("#### πŸ“ˆ<br><strong>Data Analysis</strong>", unsafe_allow_html=True)
with col6:
st.markdown("#### πŸ—„οΈ<br><strong>SQL & Power BI</strong>", unsafe_allow_html=True)
st.markdown("""
<h4 style='color:#4CAF50; font-family:sans-serif;'>
πŸš€ <strong>Select Your Mentor Domain:</strong>
</h4>
""", unsafe_allow_html=True)
# Dropdown
mentor_type = st.selectbox(
"",
["", "🐍 Python", "πŸ€– Machine Learning", "🧠 Deep Learning", "πŸ“Š Statistics", "πŸ“ˆ Data Analysis", "πŸ—„οΈ SQL & Power BI"]
)
if mentor_type:
st.subheader(f"🧠 {mentor_type.upper()} Mentor Chat")
experience = st.slider("Your experience (in years):", 0, 20, 1)
user_input = st.text_input("Ask your question:")
output_container = st.empty()
if mentor_type == "python":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "machine_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "deep_learning":
model = HuggingFaceEndpoint(repo_id="deepseek-ai/DeepSeek-R1", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "stats":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.2-1B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "data_anaylasis":
model = HuggingFaceEndpoint(repo_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational")
elif mentor_type == "sql and powerbi":
model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
model = HuggingFaceEndpoint(repo_id="meta-llama/Meta-Llama-3-70B-Instruct", provider="hyperbolic", temperature=0.5, max_new_tokens=150, task="conversational")
chat_model = ChatHuggingFace(
llm=model,
repo_id=model.repo_id,
provider="nebius",
temperature=0.5,
max_new_tokens=150,
task="conversational"
)
if st.button("Ask") and user_input:
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
f"You are a helpful and experienced {mentor_type.upper()} mentor assisting a learner with {experience} years of experience."
),
HumanMessagePromptTemplate.from_template("{question}")
])
formatted_prompt = prompt.format_messages(question=user_input)
with st.spinner("Mentor is thinking..."):
response = chat_model.invoke(formatted_prompt)
output_container.markdown(f"πŸ‘€ You: {user_input}")
output_container.markdown(f"🧠 Mentor: {response.content}")
if st.button("Clear Output"):
output_container.empty()