File size: 4,590 Bytes
7e0f06b
 
 
 
 
 
 
53277c4
 
7e0f06b
 
d988920
7e0f06b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fe0cac
d988920
7e0f06b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import os
import streamlit as st
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace

# Set Hugging Face API token securely
HF_TOKEN = os.getenv("key")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("key")
os.environ["HF_TOKEN"] = os.getenv("key")

# ---------- Page Configuration ----------
st.set_page_config(page_title="AI Visionary by Innomatics", page_icon="๐Ÿง ", layout="centered")

# ---------- Custom CSS ----------
st.markdown("""
<style>
    .main {background-color: transparent; padding: 20px;}
    .stButton>button {
        background-color: white; color: #8B0000; border: 2px solid white;
        border-radius: 10px; padding: 10px 20px; font-size: 18px;
        font-weight: bold; width: 100%; transition: 0.3s ease-in-out;
    }
    .stButton>button:hover {
        background-color: #8B0000; color: white; border: 2px solid white;
    }
    h1, h2, h3, p, div, span, label, input, textarea {
        color: black !important;
    }
</style>
""", unsafe_allow_html=True)

# ---------- UI Header ----------
st.markdown("<h1 style='text-align: center'>AI Visionary by Innomatics๐Ÿง </h1>", unsafe_allow_html=True)
st.markdown("### ๐Ÿ‘‹ Welcome to the AI Visionary by Innomatics ๐Ÿค–")
st.markdown("""
This dashboard provides an AI mentor that gives instant, skill-adapted help 
with Python, SQL, PowerBI, and data science to guide you through module doubts.
""")
st.markdown("## In which module do you have doubt?")

# ---------- Module Buttons ----------
modules = {
    "Python": "๐Ÿ",
    "SQL": "๐Ÿ—ƒ๏ธ",
    "PowerBI": "๐Ÿ“Š",
    "Statistics": "๐Ÿ“ˆ",
    "Machine_Learning": "๐Ÿค–",
    "Deep_Learning": "๐Ÿง "
}

cols = st.columns(3)
for i, (module, emoji) in enumerate(modules.items()):
    if cols[i % 3].button(f"{emoji} {module}", key=f"{module}_btn"):
        st.session_state.mentor_type = module
        st.session_state.mentor_emoji = emoji

# ---------- Session State Defaults ----------
st.session_state.setdefault("mentor_type", None)
st.session_state.setdefault("mentor_emoji", "๐Ÿง ")

# ---------- Chat Interface ----------
if st.session_state.mentor_type:
    mentor = st.session_state.mentor_type
    emoji = st.session_state.mentor_emoji

    st.subheader(f"{emoji} {mentor.upper()} Mentor Chat")
    experience = st.slider("Your experience (in years):", 0, 20, 1)
    user_input = st.text_input("Ask your question:")
    output_container = st.empty()

    # Select HuggingFace model based on module
    model_map = {
        "Python": ("meta-llama/Llama-3.1-8B-Instruct", "nebius"),
        "SQL": ("deepseek-ai/DeepSeek-R1", "nebius"),
        "PowerBI": ("deepseek-ai/DeepSeek-R1", "nebius"),
        "Statistics": ("meta-llama/Llama-3.2-1B-Instruct", "nebius"),
        "Machine_Learning": ("meta-llama/Llama-3.3-70B-Instruct", "nebius"),
        "Deep_Learning": ("meta-llama/Meta-Llama-3-70B-Instruct", "hyperbolic")
    }

    repo_id, provider = model_map.get(mentor, (None, None))
    if repo_id:
        model = HuggingFaceEndpoint(repo_id=repo_id, provider=provider, temperature=0.5, max_new_tokens=150)
        chat_model = ChatHuggingFace(llm=model)

        col1, col2 = st.columns(2)

        with col1:
            if st.button("๐Ÿš€ Ask", key="ask_btn"):
                if user_input:
                    prompt = ChatPromptTemplate.from_messages([
                        SystemMessagePromptTemplate.from_template(
                            f"You are a helpful and experienced {mentor.upper()} mentor {emoji} assisting a learner with {experience} years of experience."
                        ),
                        HumanMessagePromptTemplate.from_template("{question}")
                    ])
                    formatted_prompt = prompt.format_messages(question=user_input)

                    with st.spinner(f"{emoji} Mentor is thinking..."):
                        try:
                            response = chat_model.invoke(formatted_prompt)
                            output_container.markdown(f"**๐Ÿ‘ค You:** {user_input}")
                            output_container.markdown(f"**{emoji} Mentor:** {response.content}")
                        except Exception as e:
                            output_container.error(f"โŒ An error occurred: {str(e)}")
                else:
                    output_container.warning("โš ๏ธ Please enter a question first!")

        with col2:
            if st.button("๐Ÿงน Clear", key="clear_btn"):
                output_container.empty()