File size: 6,832 Bytes
377a4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c63b2ee
 
 
377a4c5
 
 
 
 
 
c63b2ee
377a4c5
 
 
 
 
 
c63b2ee
 
 
377a4c5
 
 
 
 
 
 
 
 
 
 
 
 
c63b2ee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# Unica Chatbot for Q&A
#This chatbot will be working for `Q&A` and we will make sure that it's not limited by trained data knowledge by also be able to search relevant information on the internet by using `tavily`.
## Installing all the packages
# Adding all packages

## Chatbot Logic
# importing the packages to be used
import logging
import os
import markdown
from dotenv import load_dotenv
import gradio as gr
from langchain_groq import ChatGroq
from langchain.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage

# Set up logging configuration
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)
# Intializing the API Key from the environment variables
# Load environment variables from .env.local file
load_dotenv('.env.local')

# Access the variables
groq_api_key = os.getenv('GROQ_API_KEY')
tavily_api_key = os.getenv('TAVILY_API_KEY')

# LLM Initialization
llm = ChatGroq(
    model_name="llama-3.3-70b-versatile",
    groq_api_key=groq_api_key,
    temperature=0
)

# Tavily Search engine for LLM
tavilySearch = TavilySearchAPIWrapper(tavily_api_key=tavily_api_key)
search_tool = TavilySearchResults(max_results=3, api_wrapper=tavilySearch)
# System Prompt
system_prompt = """
You are Unica, a friendly and helpful assistant designed to support students on the Moodle platform. Your primary goal is to provide quick and accurate answers to students' study-related questions, helping them navigate their courses and resources efficiently.

Guidelines:
1. Understand the context of Moodle and the student's coursework.
2. Be concise and clear in your responses.
3. Provide relevant information directly addressing the student's question.
4. Maintain a positive and encouraging tone.
5. Offer study tips when appropriate.
6. Handle unknowns gracefully by suggesting resources or encouraging further inquiry.
7. Respect privacy and maintain a professional demeanor.
8. Encourage engagement with course materials and resources.
9. Use Markdown formatting to enhance the readability of your responses.

Example Responses:
- Student: "How do I submit my assignment on Moodle?"
  Unica: "To submit your assignment, navigate to the course page, find the assignment link, and click on **'Submit assignment'**. Follow the prompts to upload your file. If you encounter any issues, feel free to ask for further assistance!"

- Student: "What are the upcoming deadlines for my course?"
  Unica: "To view upcoming deadlines, check the course calendar or the announcements section on your Moodle dashboard. If you have specific questions about a deadline, it's best to contact your instructor."
"""
# Agent Class
class Agent:
    def __init__(self, model, tools, system_prompt=""):
        self.system_prompt = system_prompt
        self.model = model
        self.tools = {t.name: t for t in tools}

    def call_groq(self, messages):
        if self.system_prompt:
            messages = [SystemMessage(content=self.system_prompt)] + messages
        logger.info(f"Calling Groq with messages: {messages}")
        message = self.model.invoke(messages)
        logger.info(f"Groq response: {message}")
        return message

    def handle_query(self, user_query):
        messages = [HumanMessage(content=user_query)]
        response = self.call_groq(messages)
        return response.content

# Initialize Agent
agent = Agent(model=llm, tools=[search_tool], system_prompt=system_prompt)

def render_markdown(markdown_text, is_user=False):
    # Convert Markdown to HTML
    html_content = markdown.markdown(markdown_text)
    # Wrap the HTML content in a chat bubble layout
    bubble_class = "user-bubble" if is_user else "assistant-bubble"
    bubble_html = f"""
    <div class="{bubble_class}">
        {html_content}
    </div>
    <style>
        .user-bubble {{
            background-color: #e1f5fe;
            border-radius: 15px;
            padding: 10px;
            margin: 10px 0;
            max-width: 70%;
            align-self: flex-end;
            box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
        }}
        .assistant-bubble {{
            background-color: #f1f1f1;
            border-radius: 15px;
            padding: 10px;
            margin: 10px 0;
            max-width: 70%;
            align-self: flex-start;
            box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
        }}
        .chat-container {{
            display: flex;
            flex-direction: column;
            gap: 10px;
            padding: 10px;
        }}
    </style>
    """
    return bubble_html

def handle_user_query(user_query):
    try:
        response = agent.handle_query(user_query)
        logger.info(f"Assistant's message: {response}")
        # Render the Markdown response to HTML with chat bubble layout
        user_html = render_markdown(user_query, is_user=True)
        assistant_html = render_markdown(response, is_user=False)
        chat_html = f"""
        <div class="chat-container">
            {user_html}
            {assistant_html}
        </div>
        """
        return chat_html
    except Exception as e:
        logger.error(f"Error handling user query: {e}")
        return "Sorry, I encountered an error. Please try again."

## Chatbot UI
# Ui
# Gradio Interface with Interstellar theme
## Chatbot UI
# Ui
# Gradio Interface with Interstellar theme
with gr.Blocks(theme='ParityError/Interstellar') as demo:
    chatbot = gr.Chatbot([], label="Chat with Unica")
    user_input = gr.Textbox(lines=2, placeholder="Ask your study-related questions here", label="Your Message")
    send_button = gr.Button("Send")  # Add a send button

    def respond(user_message, history):
        response = agent.handle_query(user_message)
        history.append((user_message, response))
        return history, ""

    # Link the send button to the respond function
    send_button.click(respond, [user_input, chatbot], [chatbot, user_input])
    # Also link the textbox submission to the respond function
    user_input.submit(respond, [user_input, chatbot], [chatbot, user_input])

if __name__ == "__main__":
    demo.launch(pwa=True, share=True)

'''
Note:
If you are running this chatbot on your own feel free to add parameter of `debug=True` in launch() so that you can be able to handle any bugs asap
Happy coding *_*
'''

## Farewell 
#To be able to interact with the `Moodle` as plugin, i believe we can use the *iframe* and then insert it as HTML but we can also try deploying it on the `HuggingFace` and then use `API` to interact with it.
#Lemme try all the ways to see what can be so cool and efficient to the user ^_^ .