Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| import PyPDF2 | |
| # Load Hugging Face QA model (FLAN-T5) | |
| qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-large") | |
| # Function to extract text from uploaded PDF and generate answer | |
| def answer_question(pdf_file, question): | |
| # Step 1: Read and extract text from the PDF | |
| reader = PyPDF2.PdfReader(pdf_file) | |
| pdf_text = "" | |
| for page in reader.pages: | |
| text = page.extract_text() | |
| if text: | |
| pdf_text += text.strip() + "\n" | |
| # Step 2: Build the prompt | |
| prompt = f"Context: {pdf_text}\nQuestion: {question}" | |
| # Step 3: Run the model | |
| result = qa_pipeline(prompt, max_length=512, do_sample=True)[0]['generated_text'] | |
| return result.strip() | |
| # Gradio interface | |
| iface = gr.Interface( | |
| fn=answer_question, | |
| inputs=[ | |
| gr.File(label="Upload a PDF file"), | |
| gr.Textbox(label="Ask your question") | |
| ], | |
| outputs="text", | |
| title="PDF-Based Question Answering", | |
| description="Upload a PDF and ask questions about its content using FLAN-T5." | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| iface.launch() | |