File size: 23,270 Bytes
f25ac6c
93fe96e
 
 
 
 
 
 
 
 
 
 
 
 
f25ac6c
93fe96e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f25ac6c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
import gradio as gr
import os
from pathlib import Path
import fitz  # PyMuPDF
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import anthropic
import base64
from PIL import Image
import io
import re

# --- Minimal PDF Search & Display App ---

# 1. Preprocess PDFs and build vector DB
class CurriculumChatbot:
    def __init__(self, slides_dir="Slides", fast_mode=False):
        self.pdf_pages = {}  # {filename: {page_num: text}}
        self.pdf_files = {}  # {filename: path}
        self.chunks = []
        self.chunk_metadata = []
        self.vector_db = None
        self.embeddings = None
        self.llm = None
        self.qa_chain = None
        self.slide_selection_chain = None
        self.focused_qa_chain = None
        self.response_cache = {}  # Simple cache for responses
        self.fast_mode = fast_mode  # Skip LLM for faster responses
        self._process_pdfs(slides_dir)
        self._build_vector_db()
        if not fast_mode:
            self._setup_llm()
        else:
            print("πŸš€ Fast mode enabled - LLM disabled for instant responses")

    def _process_pdfs(self, slides_dir):
        slides_path = Path(slides_dir)
        pdf_files = list(slides_path.glob("*.pdf"))
        for pdf_file in pdf_files:
            self.pdf_files[pdf_file.name] = str(pdf_file)
            doc = fitz.open(str(pdf_file))
            pages = {}
            for page_num in range(len(doc)):
                page = doc[page_num]
                text = page.get_text()
                if text.strip():
                    pages[page_num + 1] = text.strip()
            self.pdf_pages[pdf_file.name] = pages
            doc.close()
            # Add each page as a chunk
            for page_num, text in pages.items():
                self.chunks.append(text)
                self.chunk_metadata.append({
                    "filename": pdf_file.name,
                    "page_number": page_num
                })

    def _build_vector_db(self):
        self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
        self.vector_db = Chroma.from_texts(
            texts=self.chunks,
            embedding=self.embeddings,
            metadatas=self.chunk_metadata,
            persist_directory="./chroma_db"
        )
    
    def _setup_llm(self):
        """Setup LLM with Claude"""
        try:
            # Initialize LLM attributes
            self.llm = None
            self.qa_chain = None
            self.focused_qa_chain = None
            self.content_selection_chain = None
            
            # Load Claude
            self.anthropic_client = anthropic.Anthropic(
                api_key=os.environ.get("ANTHROPIC_API_KEY")
            )
            
            # Create a custom LLM wrapper that works with LangChain
            class ClaudeLLM:
                def __init__(self, client):
                    self.client = client
                
                def __call__(self, prompt):
                    try:
                        response = self.client.messages.create(
                            model="claude-3-5-haiku-20241022",
                            max_tokens=1500,
                            temperature=0.7,
                            messages=[{"role": "user", "content": prompt}]
                        )
                        return response.content[0].text
                    except Exception as e:
                        print(f"Error calling Claude: {e}")
                        return "I'm sorry, I couldn't generate a response at the moment."
            
            self.llm = ClaudeLLM(self.anthropic_client)
            
            # Create content selection prompt template
            content_selection_template = """You are an expert curriculum analyst. Your task is to find the most relevant slide for a student's question.

Student Question: {question}

Available Slide Contents:
{slide_contents}

Instructions:
1. Read each slide content carefully
2. Identify which slide best answers the student's specific question
3. Consider the exact terminology and concepts the student is asking about
4. If the student asks about "for loops", look for slides that specifically mention "for loops"
5. If the student asks about "loops" in general, look for slides that explain loops comprehensively
6. Respond with ONLY the slide number (1, 2, 3, etc.) that is most relevant
7. If no slide is relevant, respond with "0"

Most relevant slide number:"""
            
            self.content_selection_chain = LLMChain(llm=self.llm, prompt=PromptTemplate(
                input_variables=["question", "slide_contents"],
                template=content_selection_template
            ))
            
            # Create QA prompt template for Claude
            qa_template = """You are an expert programming tutor. Your task is to provide a comprehensive, educational answer based on the curriculum content.

Curriculum Content:
{filled_context}

Student Question: {question}

Instructions:
1. Analyze the curriculum content carefully
2. Provide a detailed, educational explanation
3. Use examples if the content contains them
4. Explain the concept step-by-step
5. Make sure your answer directly addresses what the student is asking
6. If the content is limited, provide additional educational context
7. Structure your answer clearly with bullet points or numbered lists when appropriate

Your detailed answer:"""
            
            self.qa_chain = LLMChain(llm=self.llm, prompt=PromptTemplate(
                input_variables=["question", "filled_context"],
                template=qa_template
            ))
            
            # Create focused answer prompt template
            focused_qa_template = """You are an expert programming tutor. Your task is to provide a comprehensive, educational answer based on the curriculum slide content.

Slide Content:
{slide_content}

Student Question: {question}

Instructions:
1. Analyze the slide content carefully
2. Provide a detailed, educational explanation
3. Use examples if the slide contains them
4. Explain the concept step-by-step
5. Make sure your answer directly addresses what the student is asking
6. If the slide content is limited, provide additional educational context
7. Structure your answer clearly with bullet points or numbered lists when appropriate

Your detailed answer:"""
            
            self.focused_qa_chain = LLMChain(llm=self.llm, prompt=PromptTemplate(
                input_variables=["question", "slide_content"],
                template=focused_qa_template
            ))
            
            print("βœ… LLM loaded successfully!")
            print(f"πŸ” LLM object: {self.llm}")
            print(f"πŸ” Content selection chain: {self.content_selection_chain}")
            print(f"πŸ” Focused QA chain: {self.focused_qa_chain}")
        except Exception as e:
            print(f"Warning: Could not load LLM: {e}")
            print("Falling back to basic search mode...")
            self.llm = None
            self.qa_chain = None
            self.focused_qa_chain = None
            self.content_selection_chain = None

    def get_pdf_page_image(self, pdf_path, page_num):
        try:
            doc = fitz.open(pdf_path)
            if page_num <= len(doc):
                page = doc[page_num - 1]
                mat = fitz.Matrix(1.5, 1.5)
                pix = page.get_pixmap(matrix=mat)
                img_data = pix.tobytes("png")
                img = Image.open(io.BytesIO(img_data))
                if img.mode != 'RGB':
                    img = img.convert('RGB')
                doc.close()
                return img
            doc.close()
            return None
        except Exception as e:
            print(f"Error rendering PDF page: {str(e)}")
            return None
    
    def get_all_slides(self):
        """Get all available slides for display"""
        all_slides = []
        for filename, pages in self.pdf_pages.items():
            for page_num in pages.keys():
                img = self.get_pdf_page_image(self.pdf_files[filename], page_num)
                if img:
                    all_slides.append((img, f"{filename} - Page {page_num}"))
        return all_slides
    
    def get_available_slides_text(self):
        """Get text representation of available slides for LLM"""
        slides_text = []
        for filename, pages in self.pdf_pages.items():
            for page_num in pages.keys():
                slides_text.append(f"{filename} - Page {page_num}")
        return "\n".join(slides_text)

    def chat(self, query):
        """Comprehensive chat function with LLM-powered content selection and answers"""
        # First, try to find relevant curriculum content using vector search
        results = self.vector_db.similarity_search(query, k=5)  # Get top 5 results for LLM analysis
        
        curriculum_relevance_score = 0
        best_slide_content = ""
        best_result = None
        
        if results:
            curriculum_relevance_score = len(results)
            
            # Debug: Print what we found
            print(f"Query: {query}")
            print(f"Found {len(results)} relevant results for LLM analysis:")
            for i, result in enumerate(results):
                print(f"  {i+1}. {result.metadata['filename']} - Page {result.metadata['page_number']}")
                print(f"     Content: {result.page_content[:100]}...")
            
            # Use LLM to select the most relevant content
            if self.content_selection_chain and curriculum_relevance_score > 0:
                try:
                    # Prepare slide contents for LLM analysis
                    slide_contents = []
                    for i, result in enumerate(results):
                        filename = result.metadata['filename']
                        page_num = result.metadata['page_number']
                        content = result.page_content[:800]  # More content for better analysis
                        slide_contents.append(f"Slide {i+1} ({filename} - Page {page_num}):\n{content}")
                    
                    slide_contents_text = "\n\n".join(slide_contents)
                    
                    print(f"πŸ” Using LLM to select most relevant content...")
                    
                    # Get LLM's selection
                    selection_response = self.content_selection_chain.run(
                        question=query, 
                        slide_contents=slide_contents_text
                    )
                    
                    print(f"LLM Selection Response: {selection_response}")
                    
                    # Parse the selection (expecting a number)
                    try:
                        # Extract number from response
                        import re
                        numbers = re.findall(r'\d+', selection_response)
                        if numbers:
                            selected_index = int(numbers[0]) - 1  # Convert to 0-based index
                            if 0 <= selected_index < len(results):
                                best_result = results[selected_index]
                                best_slide_content = best_result.page_content
                                print(f"βœ… LLM selected slide {selected_index + 1}")
                            else:
                                print(f"⚠️ LLM selection out of range: {selected_index + 1}")
                                # Fallback to first result
                                best_result = results[0]
                                best_slide_content = best_result.page_content
                        else:
                            print("⚠️ No number found in LLM response, using first result")
                            best_result = results[0]
                            best_slide_content = best_result.page_content
                    except Exception as e:
                        print(f"Error parsing LLM selection: {e}")
                        # Fallback to first result
                        best_result = results[0]
                        best_slide_content = best_result.page_content
                        
                except Exception as e:
                    print(f"Error in LLM content selection: {e}")
                    # Fallback to simple selection
                    best_result = results[0]
                    best_slide_content = best_result.page_content
            else:
                # Fallback to simple selection if no LLM
                best_result = results[0]
                best_slide_content = best_result.page_content
        
        # Generate focused LLM answer using the most relevant slide
        if self.focused_qa_chain and curriculum_relevance_score > 0:
            try:
                print(f"πŸ” Calling LLM with question: {query}")
                print(f"πŸ” LLM available: {self.focused_qa_chain is not None}")
                
                answer = self.focused_qa_chain.run(question=query, slide_content=best_slide_content)
                
                print(f"LLM Response: {answer[:200]}...")
                
                # Clean up the answer (Claude is cleaner, but just in case)
                answer = answer.strip()
                
                # Remove any prompt artifacts
                if answer.startswith("Answer:"):
                    answer = answer[7:].strip()
                if answer.startswith("Your detailed answer:"):
                    answer = answer[20:].strip()
                
                # Check if the answer is too short, generic, or poor quality
                if (len(answer.strip()) < 100 or 
                    answer.lower().startswith("how does that work") or
                    "loops" in query.lower() and "loop" not in answer.lower() or
                    answer.strip() == query.strip()):
                    
                    # Generate a comprehensive educational answer
                    if "loop" in query.lower():
                        if "for loop" in query.lower():
                            answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n**For Loops** are a specific type of loop in programming that allow you to iterate over a sequence (like a range of numbers) a predetermined number of times. They are different from while loops and are particularly useful when you know exactly how many times you want to repeat an action.\n\nKey characteristics of for loops:\n- They use a counter variable\n- They have a defined start, end, and increment\n- They are perfect for iterating through lists, ranges, or any sequence\n- They are more structured than while loops"
                        else:
                            answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n**Loops** are fundamental programming constructs that allow you to repeat code multiple times without having to write the same code repeatedly. They are essential for:\n\n- Processing large amounts of data\n- Repeating actions a specific number of times\n- Iterating through collections like lists and arrays\n- Automating repetitive tasks\n\nThere are different types of loops including for loops, while loops, and do-while loops, each with their own use cases."
                    else:
                        answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide explains the concept clearly. The curriculum content provides the foundation for understanding this programming concept."
                
            except Exception as e:
                print(f"Error generating focused answer: {e}")
                # Fallback to slide content with explanation
                answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide contains the relevant information about your question."
        
        elif self.qa_chain:
            # Fallback to general LLM if focused chain fails
            try:
                if curriculum_relevance_score > 0:

                    context = "\n\n".join([result.page_content for result in results])
                    filled_context = f"Curriculum Context:\n{context}\n\nPlease answer based on this curriculum content."
                else:

                    filled_context = "Note: This question is not covered in the current curriculum. Please provide a general programming answer."
                
                answer = self.qa_chain.run(question=query, filled_context=filled_context)
                answer = answer.strip()
                
                # Remove any prompt artifacts (Claude is cleaner, but just in case)
                if answer.startswith("Answer:"):
                    answer = answer[7:].strip()
                if answer.startswith("Provide a clear, educational answer explaining the concept:"):
                    answer = answer[58:].strip()
                
                # Check if the answer is too short
                if len(answer.strip()) < 50:
                    if curriculum_relevance_score > 0:
                        answer = f"Based on the curriculum content:\n\n{best_slide_content}\n\nThis slide explains the concept clearly."
                    else:
                        answer = "I'm sorry, I couldn't generate a proper answer. Please try rephrasing your question."
                
                # Add warning if not in curriculum
                if curriculum_relevance_score == 0:
                    answer = "πŸ’‘ **Note: This topic isn't covered in your current curriculum, but here's a helpful answer:**\n\n" + answer
                
            except Exception as e:
                print(f"Error generating answer: {e}")

                if curriculum_relevance_score > 0:
                    answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide contains the relevant information about your question."
                else:
                    answer = "I'm sorry, I couldn't generate an answer at the moment. Please try rephrasing your question."
        else:
            # If no LLM available
            if curriculum_relevance_score > 0:
                answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n*Note: AI generation is not available, but here's the relevant curriculum content.*"
            else:
                answer = "I couldn't find relevant content in the curriculum for this question. Please try rephrasing or ask about a different programming topic."
        
        # Get the most relevant slide and its neighboring pages
        relevant_slides = []
        if curriculum_relevance_score > 0 and best_result:
            # Use the LLM-selected result
            filename = best_result.metadata["filename"]
            page_number = best_result.metadata["page_number"]
            
            # Get the specific PDF and its pages
            if filename in self.pdf_files:
                pdf_path = self.pdf_files[filename]
                doc = fitz.open(pdf_path)
                total_pages = len(doc)
                doc.close()
                
                # Use the LLM-selected page as the target
                target_page = page_number
                
                # Get the target page and neighboring pages (2 before, 2 after)
                start_page = max(1, target_page - 2)
                end_page = min(total_pages, target_page + 2)
                
                for page_num in range(start_page, end_page + 1):
                    img = self.get_pdf_page_image(pdf_path, page_num)
                    if img:
                        if page_num == target_page:
                            # Highlight the most relevant page
                            label = f"πŸ“Œ {filename} - Page {page_num} (Most Relevant)"
                        else:
                            label = f"{filename} - Page {page_num}"
                        relevant_slides.append((img, label))
                
                recommended_slide = relevant_slides[0][0] if relevant_slides else None
                recommended_label = relevant_slides[0][1] if relevant_slides else None
            else:
                # Fallback if filename not found
                recommended_slide = None
                recommended_label = None
        else:
            # If no curriculum content, provide a helpful response
            relevant_slides = []
            recommended_slide = None
            recommended_label = None
        
        return answer, recommended_slide, recommended_label, relevant_slides

# --- Gradio UI ---
chatbot = CurriculumChatbot(fast_mode=False)  # Enable AI mode by default

def gradio_chat(query, use_ai=True):
    # Temporarily switch modes based on user preference
    original_fast_mode = chatbot.fast_mode
    chatbot.fast_mode = not use_ai
    
    try:
        answer, recommended_slide, recommended_label, relevant_slides = chatbot.chat(query)
    finally:
        # Restore original mode
        chatbot.fast_mode = original_fast_mode
    
    # Use the relevant slides (specific PDF with neighboring pages)
    gallery_items = relevant_slides if relevant_slides else []
    
    return answer, gallery_items

with gr.Blocks(title="Inclusive World Curriculum Assistant", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# πŸ€– Inclusive World Curriculum Assistant\nYour AI programming tutor with curriculum-based answers and slide navigation!")
    
    with gr.Row():
        # Left Column - Chatbot Interface
        with gr.Column(scale=1):
            gr.Markdown("### πŸ’¬ Chatbot")
            gr.Markdown("**What questions do you have?**")
            
            # AI Mode Toggle
            with gr.Row():
                use_ai = gr.Checkbox(
                    label="πŸ€– Enable AI Responses", 
                    value=True, 
                    info="Toggle AI-generated answers on/off"
                )
            
            question = gr.Textbox(
                label="Question Input", 
                placeholder="e.g., What are for loops? How do variables work? Explain functions...", 
                lines=3
            )
            submit = gr.Button("πŸ€– Ask AI", variant="primary", size="lg")
            answer = gr.Markdown(label="LLM Generated Output")
        
        # Right Column - Slides Display
        with gr.Column(scale=1):
            gr.Markdown("### πŸ“„ Most Similar Slides")
            gallery = gr.Gallery(
                label="Curriculum Slides", 
                columns=1, 
                rows=3, 
                height="600px", 
                object_fit="contain",
                show_label=False
            )
    
    # Event handlers
    submit.click(fn=gradio_chat, inputs=[question, use_ai], outputs=[answer, gallery])
    question.submit(fn=gradio_chat, inputs=[question, use_ai], outputs=[answer, gallery])

if __name__ == "__main__":
    demo.launch()