import streamlit as st
import os
from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline,ChatHuggingFace
# Secure token loading
hf_token = os.getenv("KEYS")
if not hf_token:
raise ValueError("Environment variable 'KEYS' is not set.")
os.environ["HF_TOKEN"] = hf_token
os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
# Custom CSS
custom_css = """
"""
st.markdown(custom_css, unsafe_allow_html=True)
mentoring = """
"""
llama_model = HuggingFaceEndpoint(repo_id = "meta-llama/Llama-3.1-8B-Instruct",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
llama = ChatHuggingFace(llm = llama_model,repo_id = "meta-llama/Llama-3.1-8B-Instruct",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
deepseek_model = HuggingFaceEndpoint(repo_id = "deepseek-ai/DeepSeek-R1",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
deepseek = ChatHuggingFace(llm = deepseek_model,repo_id = "deepseek-ai/DeepSeek-R1",provider = 'novita',temperature = 0.7,max_new_tokens = 10,task="conversational")
from langchain.prompts import ChatPromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate
# Page definitions
def main_page():
st.title("Welcome To Innomatics AI Mentor Support")
st.write("Here is the list of modules we offer to support you in your learning journey...")
st.markdown(mentoring, unsafe_allow_html=True)
def python_page():
st.title("Hi, Have doubts in Python? 🤔 Let's solve them together 😎🤩")
exps = st.number_input("Choose a mentor based on their experience: 0-20 years")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Python."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
#st.write(response.content)
def eda_page():
st.title("Data Analysis Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Data Analysis."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
def stats_page():
st.title("Statistics Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Statistical Analysis."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
def ml_page():
st.title("Machine Learning Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in ML."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
def dl_page():
st.title("Deep Learning Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Deep learning."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
def excel_page():
st.title("MS Excel Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Excel."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = deepseek.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
def power_bi_page():
st.title("Power BI Mentoring")
exps=st.number_input("Choose a mentor based on their experience: 0-20")
doubts = st.text_input("Explain your doubt")
if doubts:
# Set up the prompt
messages = [
SystemMessagePromptTemplate.from_template("You have an experience of {exp} years in Power BI."),
HumanMessagePromptTemplate.from_template("Provide a clear solution for the following doubt: {doubt}")
]
prompt = ChatPromptTemplate.from_messages(messages)
formatted_prompt = prompt.format(exp=exps, doubt=doubts)
# Get response
response = llama.invoke(formatted_prompt)
# Display in Streamlit
st.markdown("### 🧠Solution:")
st.markdown(f'{response.content}
', unsafe_allow_html=True)
# Routing
page = st.query_params.get("page", "main")
if page == "python":
python_page()
elif page == "eda":
eda_page()
elif page == "stats":
stats_page()
elif page == "ml":
ml_page()
elif page == "dl":
dl_page()
elif page == "excel":
excel_page()
elif page == "power_bi":
power_bi_page()
else:
main_page()