CHATBOT / src /streamlit_app.py
Sreeja6600's picture
Update src/streamlit_app.py
fe04269 verified
import streamlit as st
import os
from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline,ChatHuggingFace
# Secure token loading
hf_token = os.getenv("KEYS")
if not hf_token:
raise ValueError("Environment variable 'KEYS' is not set.")
os.environ["HF_TOKEN"] = hf_token
os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
# Custom CSS
custom_css = """
<style>
html, body, [data-testid="stAppViewContainer"] {
background: url('https://wallpapercave.com/wp/wp2581376.jpg') no-repeat center center fixed;
background-size: cover;
font-family: Arial, sans-serif;
color: #f5f5f5;
}
/* Optional overlay for readability */
[data-testid="stAppViewContainer"]::before {
content: "";
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.4); /* dark overlay */
z-index: -1;
}
.button {
display: inline-block;
padding: 10px 20px;
margin: 10px;
font-size: 16px;
text-decoration: none;
color: white;
border-radius: 5px;
background-color:#E1D5FC;
transition: background-color 0.3s ease;
}
.button:hover {
background-color: #e68b7f;
}
.chat-reply {
color: #00ffcc;
background-color: #1e1e1e;
padding: 10px;
border-radius: 8px;
margin-bottom: 10px;
}
</style>
"""
st.markdown(custom_css, unsafe_allow_html=True)
mentoring = """
<div>
<a href="?page=python" class="button">Python</a>
<a href="?page=eda" class="button">Data Analysis</a>
<a href="?page=stats" class="button">Statistics</a>
<a href="?page=ml" class="button">Machine Learning</a>
<a href="?page=dl" class="button">Deep Learning</a>
<a href="?page=excel" class="button">MS Excel</a>
<a href="?page=power_bi" class="button">Power BI</a>
</div>
"""
llama_model = HuggingFaceEndpoint(repo_id = "meta-llama/Llama-3.1-8B-Instruct",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
llama = ChatHuggingFace(llm = llama_model,repo_id = "meta-llama/Llama-3.1-8B-Instruct",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
deepseek_model = HuggingFaceEndpoint(repo_id = "deepseek-ai/DeepSeek-R1",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
deepseek = ChatHuggingFace(llm = deepseek_model,repo_id = "deepseek-ai/DeepSeek-R1",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
from langchain.prompts import ChatPromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate
# Page definitions
def main_page():
st.title("Welcome To Innomatics AI Mentor Support")
st.write("Here is the list of modules we offer to support you in your learning journey...")
st.markdown(mentoring, unsafe_allow_html=True)
def python_page():
st.title("Hi, Have doubts in Python? 🤔 Let's solve them together 😎🤩")
exps = st.number_input("Choose a mentor based on their experience: 0-20 years")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Python."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
#st.write(response.content)
def eda_page():
st.title("Data Analysis Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Data Analysis."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
def stats_page():
st.title("Statistics Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Statistical Analysis."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
def ml_page():
st.title("Machine Learning Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in ML."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
def dl_page():
st.title("Deep Learning Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Deep learning."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
def excel_page():
st.title("MS Excel Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Excel."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
def power_bi_page():
st.title("Power BI Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Power BI."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠 Solution:")
st.markdown(f'<div class="chat-reply">{response.content}</div>', unsafe_allow_html=True)
# Routing
page = st.query_params.get("page", "main")
if page == "python":
python_page()
elif page == "eda":
eda_page()
elif page == "stats":
stats_page()
elif page == "ml":
ml_page()
elif page == "dl":
dl_page()
elif page == "excel":
excel_page()
elif page == "power_bi":
power_bi_page()
else:
main_page()