Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| from pathlib import Path | |
| import fitz # PyMuPDF | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains import LLMChain | |
| import anthropic | |
| import base64 | |
| from PIL import Image | |
| import io | |
| import re | |
| # --- Minimal PDF Search & Display App --- | |
| # 1. Preprocess PDFs and build vector DB | |
| class CurriculumChatbot: | |
| def __init__(self, slides_dir="Slides", fast_mode=False): | |
| self.pdf_pages = {} # {filename: {page_num: text}} | |
| self.pdf_files = {} # {filename: path} | |
| self.chunks = [] | |
| self.chunk_metadata = [] | |
| self.vector_db = None | |
| self.embeddings = None | |
| self.llm = None | |
| self.qa_chain = None | |
| self.slide_selection_chain = None | |
| self.focused_qa_chain = None | |
| self.response_cache = {} # Simple cache for responses | |
| self.fast_mode = fast_mode # Skip LLM for faster responses | |
| self._process_pdfs(slides_dir) | |
| self._build_vector_db() | |
| if not fast_mode: | |
| self._setup_llm() | |
| else: | |
| print("π Fast mode enabled - LLM disabled for instant responses") | |
| def _process_pdfs(self, slides_dir): | |
| slides_path = Path(slides_dir) | |
| pdf_files = list(slides_path.glob("*.pdf")) | |
| for pdf_file in pdf_files: | |
| self.pdf_files[pdf_file.name] = str(pdf_file) | |
| doc = fitz.open(str(pdf_file)) | |
| pages = {} | |
| for page_num in range(len(doc)): | |
| page = doc[page_num] | |
| text = page.get_text() | |
| if text.strip(): | |
| pages[page_num + 1] = text.strip() | |
| self.pdf_pages[pdf_file.name] = pages | |
| doc.close() | |
| # Add each page as a chunk | |
| for page_num, text in pages.items(): | |
| self.chunks.append(text) | |
| self.chunk_metadata.append({ | |
| "filename": pdf_file.name, | |
| "page_number": page_num | |
| }) | |
| def _build_vector_db(self): | |
| self.embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| self.vector_db = Chroma.from_texts( | |
| texts=self.chunks, | |
| embedding=self.embeddings, | |
| metadatas=self.chunk_metadata, | |
| persist_directory="./chroma_db" | |
| ) | |
| def _setup_llm(self): | |
| """Setup LLM with Claude""" | |
| try: | |
| # Initialize LLM attributes | |
| self.llm = None | |
| self.qa_chain = None | |
| self.focused_qa_chain = None | |
| self.content_selection_chain = None | |
| # Load Claude | |
| self.anthropic_client = anthropic.Anthropic( | |
| api_key=os.environ.get("ANTHROPIC_API_KEY") | |
| ) | |
| # Create a custom LLM wrapper that works with LangChain | |
| class ClaudeLLM: | |
| def __init__(self, client): | |
| self.client = client | |
| def __call__(self, prompt): | |
| try: | |
| response = self.client.messages.create( | |
| model="claude-3-5-haiku-20241022", | |
| max_tokens=1500, | |
| temperature=0.7, | |
| messages=[{"role": "user", "content": prompt}] | |
| ) | |
| return response.content[0].text | |
| except Exception as e: | |
| print(f"Error calling Claude: {e}") | |
| return "I'm sorry, I couldn't generate a response at the moment." | |
| self.llm = ClaudeLLM(self.anthropic_client) | |
| # Create content selection prompt template | |
| content_selection_template = """You are an expert curriculum analyst. Your task is to find the most relevant slide for a student's question. | |
| Student Question: {question} | |
| Available Slide Contents: | |
| {slide_contents} | |
| Instructions: | |
| 1. Read each slide content carefully | |
| 2. Identify which slide best answers the student's specific question | |
| 3. Consider the exact terminology and concepts the student is asking about | |
| 4. If the student asks about "for loops", look for slides that specifically mention "for loops" | |
| 5. If the student asks about "loops" in general, look for slides that explain loops comprehensively | |
| 6. Respond with ONLY the slide number (1, 2, 3, etc.) that is most relevant | |
| 7. If no slide is relevant, respond with "0" | |
| Most relevant slide number:""" | |
| self.content_selection_chain = LLMChain(llm=self.llm, prompt=PromptTemplate( | |
| input_variables=["question", "slide_contents"], | |
| template=content_selection_template | |
| )) | |
| # Create QA prompt template for Claude | |
| qa_template = """You are an expert programming tutor. Your task is to provide a comprehensive, educational answer based on the curriculum content. | |
| Curriculum Content: | |
| {filled_context} | |
| Student Question: {question} | |
| Instructions: | |
| 1. Analyze the curriculum content carefully | |
| 2. Provide a detailed, educational explanation | |
| 3. Use examples if the content contains them | |
| 4. Explain the concept step-by-step | |
| 5. Make sure your answer directly addresses what the student is asking | |
| 6. If the content is limited, provide additional educational context | |
| 7. Structure your answer clearly with bullet points or numbered lists when appropriate | |
| Your detailed answer:""" | |
| self.qa_chain = LLMChain(llm=self.llm, prompt=PromptTemplate( | |
| input_variables=["question", "filled_context"], | |
| template=qa_template | |
| )) | |
| # Create focused answer prompt template | |
| focused_qa_template = """You are an expert programming tutor. Your task is to provide a comprehensive, educational answer based on the curriculum slide content. | |
| Slide Content: | |
| {slide_content} | |
| Student Question: {question} | |
| Instructions: | |
| 1. Analyze the slide content carefully | |
| 2. Provide a detailed, educational explanation | |
| 3. Use examples if the slide contains them | |
| 4. Explain the concept step-by-step | |
| 5. Make sure your answer directly addresses what the student is asking | |
| 6. If the slide content is limited, provide additional educational context | |
| 7. Structure your answer clearly with bullet points or numbered lists when appropriate | |
| Your detailed answer:""" | |
| self.focused_qa_chain = LLMChain(llm=self.llm, prompt=PromptTemplate( | |
| input_variables=["question", "slide_content"], | |
| template=focused_qa_template | |
| )) | |
| print("β LLM loaded successfully!") | |
| print(f"π LLM object: {self.llm}") | |
| print(f"π Content selection chain: {self.content_selection_chain}") | |
| print(f"π Focused QA chain: {self.focused_qa_chain}") | |
| except Exception as e: | |
| print(f"Warning: Could not load LLM: {e}") | |
| print("Falling back to basic search mode...") | |
| self.llm = None | |
| self.qa_chain = None | |
| self.focused_qa_chain = None | |
| self.content_selection_chain = None | |
| def get_pdf_page_image(self, pdf_path, page_num): | |
| try: | |
| doc = fitz.open(pdf_path) | |
| if page_num <= len(doc): | |
| page = doc[page_num - 1] | |
| mat = fitz.Matrix(1.5, 1.5) | |
| pix = page.get_pixmap(matrix=mat) | |
| img_data = pix.tobytes("png") | |
| img = Image.open(io.BytesIO(img_data)) | |
| if img.mode != 'RGB': | |
| img = img.convert('RGB') | |
| doc.close() | |
| return img | |
| doc.close() | |
| return None | |
| except Exception as e: | |
| print(f"Error rendering PDF page: {str(e)}") | |
| return None | |
| def get_all_slides(self): | |
| """Get all available slides for display""" | |
| all_slides = [] | |
| for filename, pages in self.pdf_pages.items(): | |
| for page_num in pages.keys(): | |
| img = self.get_pdf_page_image(self.pdf_files[filename], page_num) | |
| if img: | |
| all_slides.append((img, f"{filename} - Page {page_num}")) | |
| return all_slides | |
| def get_available_slides_text(self): | |
| """Get text representation of available slides for LLM""" | |
| slides_text = [] | |
| for filename, pages in self.pdf_pages.items(): | |
| for page_num in pages.keys(): | |
| slides_text.append(f"{filename} - Page {page_num}") | |
| return "\n".join(slides_text) | |
| def chat(self, query): | |
| """Comprehensive chat function with LLM-powered content selection and answers""" | |
| # First, try to find relevant curriculum content using vector search | |
| results = self.vector_db.similarity_search(query, k=5) # Get top 5 results for LLM analysis | |
| curriculum_relevance_score = 0 | |
| best_slide_content = "" | |
| best_result = None | |
| if results: | |
| curriculum_relevance_score = len(results) | |
| # Debug: Print what we found | |
| print(f"Query: {query}") | |
| print(f"Found {len(results)} relevant results for LLM analysis:") | |
| for i, result in enumerate(results): | |
| print(f" {i+1}. {result.metadata['filename']} - Page {result.metadata['page_number']}") | |
| print(f" Content: {result.page_content[:100]}...") | |
| # Use LLM to select the most relevant content | |
| if self.content_selection_chain and curriculum_relevance_score > 0: | |
| try: | |
| # Prepare slide contents for LLM analysis | |
| slide_contents = [] | |
| for i, result in enumerate(results): | |
| filename = result.metadata['filename'] | |
| page_num = result.metadata['page_number'] | |
| content = result.page_content[:800] # More content for better analysis | |
| slide_contents.append(f"Slide {i+1} ({filename} - Page {page_num}):\n{content}") | |
| slide_contents_text = "\n\n".join(slide_contents) | |
| print(f"π Using LLM to select most relevant content...") | |
| # Get LLM's selection | |
| selection_response = self.content_selection_chain.run( | |
| question=query, | |
| slide_contents=slide_contents_text | |
| ) | |
| print(f"LLM Selection Response: {selection_response}") | |
| # Parse the selection (expecting a number) | |
| try: | |
| # Extract number from response | |
| import re | |
| numbers = re.findall(r'\d+', selection_response) | |
| if numbers: | |
| selected_index = int(numbers[0]) - 1 # Convert to 0-based index | |
| if 0 <= selected_index < len(results): | |
| best_result = results[selected_index] | |
| best_slide_content = best_result.page_content | |
| print(f"β LLM selected slide {selected_index + 1}") | |
| else: | |
| print(f"β οΈ LLM selection out of range: {selected_index + 1}") | |
| # Fallback to first result | |
| best_result = results[0] | |
| best_slide_content = best_result.page_content | |
| else: | |
| print("β οΈ No number found in LLM response, using first result") | |
| best_result = results[0] | |
| best_slide_content = best_result.page_content | |
| except Exception as e: | |
| print(f"Error parsing LLM selection: {e}") | |
| # Fallback to first result | |
| best_result = results[0] | |
| best_slide_content = best_result.page_content | |
| except Exception as e: | |
| print(f"Error in LLM content selection: {e}") | |
| # Fallback to simple selection | |
| best_result = results[0] | |
| best_slide_content = best_result.page_content | |
| else: | |
| # Fallback to simple selection if no LLM | |
| best_result = results[0] | |
| best_slide_content = best_result.page_content | |
| # Generate focused LLM answer using the most relevant slide | |
| if self.focused_qa_chain and curriculum_relevance_score > 0: | |
| try: | |
| print(f"π Calling LLM with question: {query}") | |
| print(f"π LLM available: {self.focused_qa_chain is not None}") | |
| answer = self.focused_qa_chain.run(question=query, slide_content=best_slide_content) | |
| print(f"LLM Response: {answer[:200]}...") | |
| # Clean up the answer (Claude is cleaner, but just in case) | |
| answer = answer.strip() | |
| # Remove any prompt artifacts | |
| if answer.startswith("Answer:"): | |
| answer = answer[7:].strip() | |
| if answer.startswith("Your detailed answer:"): | |
| answer = answer[20:].strip() | |
| # Check if the answer is too short, generic, or poor quality | |
| if (len(answer.strip()) < 100 or | |
| answer.lower().startswith("how does that work") or | |
| "loops" in query.lower() and "loop" not in answer.lower() or | |
| answer.strip() == query.strip()): | |
| # Generate a comprehensive educational answer | |
| if "loop" in query.lower(): | |
| if "for loop" in query.lower(): | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n**For Loops** are a specific type of loop in programming that allow you to iterate over a sequence (like a range of numbers) a predetermined number of times. They are different from while loops and are particularly useful when you know exactly how many times you want to repeat an action.\n\nKey characteristics of for loops:\n- They use a counter variable\n- They have a defined start, end, and increment\n- They are perfect for iterating through lists, ranges, or any sequence\n- They are more structured than while loops" | |
| else: | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n**Loops** are fundamental programming constructs that allow you to repeat code multiple times without having to write the same code repeatedly. They are essential for:\n\n- Processing large amounts of data\n- Repeating actions a specific number of times\n- Iterating through collections like lists and arrays\n- Automating repetitive tasks\n\nThere are different types of loops including for loops, while loops, and do-while loops, each with their own use cases." | |
| else: | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide explains the concept clearly. The curriculum content provides the foundation for understanding this programming concept." | |
| except Exception as e: | |
| print(f"Error generating focused answer: {e}") | |
| # Fallback to slide content with explanation | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide contains the relevant information about your question." | |
| elif self.qa_chain: | |
| # Fallback to general LLM if focused chain fails | |
| try: | |
| if curriculum_relevance_score > 0: | |
| context = "\n\n".join([result.page_content for result in results]) | |
| filled_context = f"Curriculum Context:\n{context}\n\nPlease answer based on this curriculum content." | |
| else: | |
| filled_context = "Note: This question is not covered in the current curriculum. Please provide a general programming answer." | |
| answer = self.qa_chain.run(question=query, filled_context=filled_context) | |
| answer = answer.strip() | |
| # Remove any prompt artifacts (Claude is cleaner, but just in case) | |
| if answer.startswith("Answer:"): | |
| answer = answer[7:].strip() | |
| if answer.startswith("Provide a clear, educational answer explaining the concept:"): | |
| answer = answer[58:].strip() | |
| # Check if the answer is too short | |
| if len(answer.strip()) < 50: | |
| if curriculum_relevance_score > 0: | |
| answer = f"Based on the curriculum content:\n\n{best_slide_content}\n\nThis slide explains the concept clearly." | |
| else: | |
| answer = "I'm sorry, I couldn't generate a proper answer. Please try rephrasing your question." | |
| # Add warning if not in curriculum | |
| if curriculum_relevance_score == 0: | |
| answer = "π‘ **Note: This topic isn't covered in your current curriculum, but here's a helpful answer:**\n\n" + answer | |
| except Exception as e: | |
| print(f"Error generating answer: {e}") | |
| if curriculum_relevance_score > 0: | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\nThis slide contains the relevant information about your question." | |
| else: | |
| answer = "I'm sorry, I couldn't generate an answer at the moment. Please try rephrasing your question." | |
| else: | |
| # If no LLM available | |
| if curriculum_relevance_score > 0: | |
| answer = f"Based on the curriculum slide:\n\n{best_slide_content}\n\n*Note: AI generation is not available, but here's the relevant curriculum content.*" | |
| else: | |
| answer = "I couldn't find relevant content in the curriculum for this question. Please try rephrasing or ask about a different programming topic." | |
| # Get the most relevant slide and its neighboring pages | |
| relevant_slides = [] | |
| if curriculum_relevance_score > 0 and best_result: | |
| # Use the LLM-selected result | |
| filename = best_result.metadata["filename"] | |
| page_number = best_result.metadata["page_number"] | |
| # Get the specific PDF and its pages | |
| if filename in self.pdf_files: | |
| pdf_path = self.pdf_files[filename] | |
| doc = fitz.open(pdf_path) | |
| total_pages = len(doc) | |
| doc.close() | |
| # Use the LLM-selected page as the target | |
| target_page = page_number | |
| # Get the target page and neighboring pages (2 before, 2 after) | |
| start_page = max(1, target_page - 2) | |
| end_page = min(total_pages, target_page + 2) | |
| for page_num in range(start_page, end_page + 1): | |
| img = self.get_pdf_page_image(pdf_path, page_num) | |
| if img: | |
| if page_num == target_page: | |
| # Highlight the most relevant page | |
| label = f"π {filename} - Page {page_num} (Most Relevant)" | |
| else: | |
| label = f"{filename} - Page {page_num}" | |
| relevant_slides.append((img, label)) | |
| recommended_slide = relevant_slides[0][0] if relevant_slides else None | |
| recommended_label = relevant_slides[0][1] if relevant_slides else None | |
| else: | |
| # Fallback if filename not found | |
| recommended_slide = None | |
| recommended_label = None | |
| else: | |
| # If no curriculum content, provide a helpful response | |
| relevant_slides = [] | |
| recommended_slide = None | |
| recommended_label = None | |
| return answer, recommended_slide, recommended_label, relevant_slides | |
| # --- Gradio UI --- | |
| chatbot = CurriculumChatbot(fast_mode=False) # Enable AI mode by default | |
| def gradio_chat(query, use_ai=True): | |
| # Temporarily switch modes based on user preference | |
| original_fast_mode = chatbot.fast_mode | |
| chatbot.fast_mode = not use_ai | |
| try: | |
| answer, recommended_slide, recommended_label, relevant_slides = chatbot.chat(query) | |
| finally: | |
| # Restore original mode | |
| chatbot.fast_mode = original_fast_mode | |
| # Use the relevant slides (specific PDF with neighboring pages) | |
| gallery_items = relevant_slides if relevant_slides else [] | |
| return answer, gallery_items | |
| with gr.Blocks(title="Inclusive World Curriculum Assistant", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π€ Inclusive World Curriculum Assistant\nYour AI programming tutor with curriculum-based answers and slide navigation!") | |
| with gr.Row(): | |
| # Left Column - Chatbot Interface | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π¬ Chatbot") | |
| gr.Markdown("**What questions do you have?**") | |
| # AI Mode Toggle | |
| with gr.Row(): | |
| use_ai = gr.Checkbox( | |
| label="π€ Enable AI Responses", | |
| value=True, | |
| info="Toggle AI-generated answers on/off" | |
| ) | |
| question = gr.Textbox( | |
| label="Question Input", | |
| placeholder="e.g., What are for loops? How do variables work? Explain functions...", | |
| lines=3 | |
| ) | |
| submit = gr.Button("π€ Ask AI", variant="primary", size="lg") | |
| answer = gr.Markdown(label="LLM Generated Output") | |
| # Right Column - Slides Display | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π Most Similar Slides") | |
| gallery = gr.Gallery( | |
| label="Curriculum Slides", | |
| columns=1, | |
| rows=3, | |
| height="600px", | |
| object_fit="contain", | |
| show_label=False | |
| ) | |
| # Event handlers | |
| submit.click(fn=gradio_chat, inputs=[question, use_ai], outputs=[answer, gallery]) | |
| question.submit(fn=gradio_chat, inputs=[question, use_ai], outputs=[answer, gallery]) | |
| if __name__ == "__main__": | |
| demo.launch() | |