File size: 12,059 Bytes
88f3203
c6202c3
1a347be
4b1a0dd
c6202c3
 
 
 
 
a3aef29
c6202c3
 
 
4b1a0dd
 
 
 
a3aef29
4b1a0dd
 
 
 
c6202c3
 
a3aef29
c6202c3
 
a3aef29
c6202c3
 
a3aef29
c6202c3
 
 
a3aef29
c6202c3
 
 
 
 
 
 
 
a3aef29
c6202c3
 
 
a3aef29
b10dd27
c6202c3
 
 
 
 
 
 
 
a3aef29
c6202c3
 
 
a3aef29
c6202c3
a3aef29
c6202c3
 
 
b10dd27
 
c6202c3
 
a3aef29
c6202c3
a3aef29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c6202c3
 
a3aef29
 
 
 
 
 
 
 
 
 
 
 
c6202c3
 
a3aef29
 
 
1a347be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3aef29
 
 
 
 
4b1a0dd
 
 
 
 
 
 
 
 
adb30ed
4b1a0dd
 
 
 
a3aef29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b1a0dd
27e4b47
c6202c3
ea8ef30
a3aef29
ea8ef30
a3aef29
 
e10aa7b
a3aef29
 
ea8ef30
 
d06e7bd
ea8ef30
d06e7bd
ea8ef30
a3aef29
 
ea8ef30
d06e7bd
a3aef29
 
d06e7bd
a3aef29
 
6010f51
ea8ef30
6010f51
d06e7bd
 
 
6010f51
 
e10aa7b
ea8ef30
 
a3aef29
 
6010f51
e10aa7b
a3aef29
e10aa7b
 
 
 
 
d06e7bd
e10aa7b
 
ea8ef30
a3aef29
 
e10aa7b
a3aef29
e10aa7b
 
 
 
 
d06e7bd
e10aa7b
 
ea8ef30
a3aef29
 
e10aa7b
a3aef29
 
 
 
ea8ef30
d06e7bd
b10dd27
d06e7bd
ea8ef30
e10aa7b
 
0a58c6d
 
e10aa7b
 
 
 
 
ea8ef30
0a58c6d
e10aa7b
 
0a58c6d
e10aa7b
 
d06e7bd
0a58c6d
 
 
 
 
 
 
 
 
6010f51
 
 
 
d06e7bd
 
6010f51
 
 
 
 
 
d06e7bd
a3aef29
 
ea8ef30
e10aa7b
ea8ef30
 
 
6010f51
ea8ef30
 
 
 
 
 
6010f51
ea8ef30
 
 
 
0a58c6d
 
 
ea8ef30
 
 
6010f51
 
 
ea8ef30
a3aef29
 
 
e10aa7b
 
 
a3aef29
e10aa7b
a3aef29
 
 
 
 
 
 
e10aa7b
 
 
 
 
a3aef29
 
e10aa7b
 
 
 
0a58c6d
a3aef29
0a58c6d
e10aa7b
 
 
29ce07f
c6202c3
a3aef29
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
import gradio as gr
from langchain.prompts import ChatPromptTemplate
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from huggingface_hub import InferenceClient
import os
import time
import logging
import re

# --- Environment and Logging Setup ---
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Support both token names for flexibility
hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
if not hf_token:
    logger.warning("Neither HF_TOKEN nor HUGGINGFACEHUB_API_TOKEN is set, the application may not work.")

# --- LLM Configuration ---
client = InferenceClient(
    provider="together",
    api_key=hf_token,
)
math_template = ChatPromptTemplate.from_messages([
    ("system", """{system_message}
You are an expert math tutor. For every math problem:
1. Break it down into key concepts
2. Briefly explain concepts
3. Outline the process for solving a similar problem
Be comprehensive and educational. Structure your response clearly."""),
    ("human", "{question}")
])

research_template = ChatPromptTemplate.from_messages([
    ("system", """{system_message}
You are a research skills mentor. Help students with:
- Determining the validity of sources
- Evaluating source credibility and bias if a source is mentioned
- Proper citation formats (APA, MLA, Chicago, etc.)
- Research strategies and methodologies
- Academic writing techniques and structure
- Database navigation and search strategies
Provide detailed, actionable advice with specific examples."""),
    ("human", "{question}")
])

study_template = ChatPromptTemplate.from_messages([
    ("system", """{system_message}
You are a study skills coach. Help students with:
- Effective study methods for different learning styles
- Time management and scheduling techniques
- Memory techniques and retention strategies
- Test preparation and exam strategies
- Note-taking methods and organization
- Learning style optimization
- Offer short quiz sessions where you pose one to two questions at a time, then provide feedback on the students answers.
Provide comprehensive, personalized advice with practical examples."""),
    ("human", "{question}")
])

general_template = ChatPromptTemplate.from_messages([
    ("system", """{system_message}
You are EduBot, a comprehensive AI learning assistant. You help students with:
๐Ÿ“ Mathematics (Concise explanations rooted in understanding the concepts and process rather than answering the math problem directly)
๐Ÿ” Research skills (source guidance, research advice, evaluation, and citation)
๐Ÿ“š Study strategies (effective learning techniques and exam preparation)
๐Ÿ› ๏ธ Educational tools (guidance on learning resources and technologies)
Always be encouraging, patient, thorough, and comprehensive."""),
    ("human", "{question}")
])

# --- Core Logic Functions ---
def detect_subject(message):
    """Detects the subject of the user's message based on keywords."""
    message_lower = message.lower()
    
    math_keywords = ['math', 'solve', 'calculate', 'equation', 'formula', 'algebra', 'geometry', 'calculus', 'derivative', 'integral', 'theorem', 'proof']
    research_keywords = ['research', 'source', 'citation', 'bibliography', 'reference', 'academic', 'paper', 'essay', 'thesis', 'database', 'journal']
    study_keywords = ['study', 'memorize', 'exam', 'test', 'quiz', 'review', 'learn', 'remember', 'focus', 'motivation', 'notes']
    
    if any(keyword in message_lower for keyword in math_keywords):
        return math_template, "๐Ÿงฎ Math Mode"
    elif any(keyword in message_lower for keyword in research_keywords):
        return research_template, "๐Ÿ” Research Mode"
    elif any(keyword in message_lower for keyword in study_keywords):
        return study_template, "๐Ÿ“š Study Mode"
    else:
        return general_template, "๐ŸŽ“ General Mode"

def smart_truncate(text, max_length=3000):
    """Truncates text intelligently to the last full sentence or word."""
    if len(text) <= max_length:
        return text
    
    # Try to split by sentence
    sentences = re.split(r'(?<=[.!?])\s+', text[:max_length])
    if len(sentences) > 1:
        return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
    # Otherwise, split by word
    else:
        words = text[:max_length].split()
        return ' '.join(words[:-1]) + "... [Response truncated - ask for continuation]"

def respond_with_enhanced_streaming(message, history):
    """Streams the bot's response, detecting the subject and handling errors."""
    try:
        template, mode = detect_subject(message)
        
        # Build conversation history with proper LangChain message objects
        messages = []
        
        # Add system message
        system_msg = SystemMessage(content="You are EduBot, an expert AI learning assistant. Provide comprehensive, educational responses that help students truly understand concepts.")
        messages.append(system_msg)
        
        # Add conversation history if available
        if history:
            for exchange in history[-5:]:  # Keep last 5 exchanges for context
                if exchange.get("role") == "user":
                    messages.append(HumanMessage(content=exchange["content"]))
                elif exchange.get("role") == "assistant":
                    messages.append(AIMessage(content=exchange["content"]))
        
        # Add current user message
        messages.append(HumanMessage(content=message))
        
        yield f"*{mode}*\n\nGenerating response..."
        
        logger.info(f"Processing {mode} query: {message[:50]}...")
        
        # Use LangChain template to format the prompt
        formatted_prompt = template.format(
            question=message,
            system_message="You are EduBot, an expert AI learning assistant. Provide comprehensive, educational responses that help students truly understand concepts."
        )
        
        # Use the Together provider with text_generation
        response = client.text_generation(
            formatted_prompt,
            model="meta-llama/Meta-Llama-3.1-8B-Instruct",
            max_new_tokens=1024,
            temperature=0.7,
            top_p=0.9,
        )
        
        response = smart_truncate(response, max_length=3000)
        
        # Stream the response word by word
        words = response.split()
        partial_response = f"*{mode}*\n\n"
        
        for i, word in enumerate(words):
            partial_response += word + " "
            
            # Update the stream periodically
            if i % 4 == 0:
                yield partial_response
                time.sleep(0.03)
        
        final_response = f"*{mode}*\n\n{response}"
        logger.info(f"Response completed. Length: {len(response)} characters")
        yield final_response
        
    except Exception as e:
        logger.exception("Error in response generation")
        yield f"Sorry, I encountered an error: {str(e)}"

# --- Fixed Gradio UI and CSS ---
custom_css = """
/* Main container styling */
.gradio-container {
    background-color: rgb(240, 236, 230) !important;
    font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
}

/* Title styling */
.title-header {
    background-color: rgb(240, 236, 230);
    padding: 20px;
    border-bottom: 2px solid rgba(28, 18, 5, 0.1);
    text-align: left;
}

.title-header h1 {
    font-size: 1.8rem;
    font-weight: bold;
    color: black;
    margin: 0;
}

/* Chat container */
.chat-container {
    min-height: 500px;
    background-color: rgb(240, 236, 230);
}

/* Chatbot styling */
.gradio-chatbot {
    background-color: transparent !important;
    border: none !important;
    padding: 20px !important;
}

/* Message styling */
.gradio-chatbot .message.bot .markdown {
    background-color: rgb(240, 185, 103) !important;
    color: black !important;
    border-radius: 18px !important;
    padding: 12px 16px !important;
    box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important;
    border: none !important;
    max-width: 70%;
    margin-left: 0;
    margin-right: auto;
    word-wrap: break-word;
}

.gradio-chatbot .message.user .markdown {
    background-color: rgb(242, 238, 233) !important;
    color: black !important;
    border-radius: 18px !important;
    padding: 12px 16px !important;
    box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important;
    border: none !important;
    max-width: 70%;
    margin-left: auto;
    margin-right: 0;
    word-wrap: break-word;
}

/* Hide avatars */
.gradio-chatbot .avatar-container {
    display: none !important;
}

/* Input section styling */
.input-section {
    background-color: rgb(240, 236, 230);
    border-top: 2px solid rgba(28, 18, 5, 0.1);
    padding: 20px;
}

/* Button styling */
.clear-button, .send-button {
    background-color: rgb(28, 18, 5) !important;
    color: white !important;
    border: none !important;
    border-radius: 10px !important;
    padding: 8px 16px !important;
    cursor: pointer !important;
    margin: 5px !important;
}

.clear-button:hover, .send-button:hover {
    background-color: rgba(28, 18, 5, 0.8) !important;
}

.send-button {
    background-color: rgb(51, 102, 204) !important;
}

.send-button:hover {
    background-color: rgba(51, 102, 204, 0.8) !important;
}

/* Textbox styling - keep it simple */
.input-textbox {
    background-color: rgb(242, 238, 233) !important;
    border: 2px solid rgb(28, 18, 5) !important;
    border-radius: 20px !important;
}

.input-textbox textarea {
    background-color: transparent !important;
    border: none !important;
    color: black !important;
    padding: 15px !important;
    font-size: 16px !important;
}
"""

# Create the interface with proper structure
with gr.Blocks(css=custom_css, title="EduBot") as demo:
    # Title Section
    gr.HTML('<div class="title-header"><h1>๐ŸŽ“ EduBot</h1></div>')
    
    # Chat Section
    with gr.Row(elem_classes=["chat-container"]):
        chatbot = gr.Chatbot(
            type="messages",
            show_copy_button=True,
            show_share_button=False,
            avatar_images=None,
            height=500
        )
    
    # Input Section
    with gr.Column(elem_classes=["input-section"]):
        with gr.Row():
            clear = gr.Button("Clear", elem_classes=["clear-button"])
            send = gr.Button("Send", elem_classes=["send-button"])
        msg = gr.Textbox(
            placeholder="Ask me about math, research, study strategies, or any educational topic...",
            show_label=False,
            lines=3,
            max_lines=8,
            elem_classes=["input-textbox"]
        )

    def respond_and_update(message, history):
        """Main function to handle user submission."""
        if not message.strip():
            return history, ""
        
        # Add user message to history
        history.append({"role": "user", "content": message})
        # Yield history to show the user message immediately, and clear the textbox
        yield history, "" 

        # Stream the bot's response
        full_response = ""
        for response_chunk in respond_with_enhanced_streaming(message, history):
            full_response = response_chunk
            # Update the last message (bot's response)
            if len(history) > 0 and history[-1]["role"] == "user":
                history.append({"role": "assistant", "content": full_response})
            else:
                history[-1] = {"role": "assistant", "content": full_response}
            yield history, ""

    def clear_chat():
        """Clear the chat history."""
        return [], ""

    # Set up the "send on Enter" event and send button click
    msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg])
    send.click(respond_and_update, [msg, chatbot], [chatbot, msg])
    
    # Set up the clear button
    clear.click(clear_chat, outputs=[chatbot, msg])

if __name__ == "__main__":
    logger.info("Starting EduBot...")
    demo.launch(debug=True)