import gradio as gr import urllib.request import fitz # PyMuPDF (takes PDF) import re from tqdm import tqdm from openai import OpenAI import faiss import numpy as np from sentence_transformers import SentenceTransformer # Function to download PDF from a user-inputted URL # Uses the urllib.request.urlretrieve function to fetch the PDF from the url and # save it as a file at output_path. # def download_pdf(url, output_path): # urllib.request.urlretrieve(url, output_path) # Preprocessing function to clean text # Cleans up the extracted text by removing newlines and extra spaces. def preprocess(text): text = text.replace('\n', ' ') text = re.sub('\s+', ' ', text) return text # Convert PDF document to text # Opens the PDF using fitz.open (PyMuPDF). # Iterates through the specified page range, extracting text from each page. # Applies the preprocess function to clean up each page's text. # Collects and returns a list of the cleaned text strings, one for each page. def pdf_to_text(path, start_page=1, end_page=None): doc = fitz.open(path) total_pages = doc.page_count if end_page is None or end_page > total_pages: end_page = total_pages text_list = [] for i in tqdm(range(start_page-1, end_page), desc="Extracting text from PDF"): text = doc.load_page(i).get_text("text") text = preprocess(text) text_list.append(text) doc.close() return text_list # Convert list of texts to smaller chunks # Iterates through the list of preprocessed text strings (texts). # For each text string, splits it into words and then groups into chunks # If the end of a chunk falls short of the word_length and it's not the last # chunk, the remaining words are prepended to the next text string to avoid # having short ending chunk. # Each chunk is prefixed with its page number and enclosed in quotes. def text_to_chunks(texts, word_length=150, start_page=1): chunks = [] buffer = [] for idx, text in enumerate(texts): words = text.split(' ') for word in words: buffer.append(word) if len(buffer) >= word_length: chunk = ' '.join(buffer).strip() chunks.append(f'Page {idx+start_page}: "{chunk}"') buffer = [] # Handle the remaining buffer if it's long enough if len(buffer) >= word_length: chunk = ' '.join(buffer).strip() chunks.append(f'Page {idx+start_page}: "{chunk}"') buffer = [] return chunks # texts = pdf_to_text(pdf_path, start_page=1) # chunks = text_to_chunks(texts, word_length=150) # Chunk Embedding: model = SentenceTransformer('all-MiniLM-L6-v2') # Assuming `chunks` is your list of preprocessed text chunks # embeddings = model.encode(chunks, show_progress_bar=True) # dimension = embeddings.shape[1] # Dimension of embeddings # index = faiss.IndexFlatL2(dimension) # L2 distance for similarity # index.add(embeddings.astype(np.float32)) # Add embeddings to index # Querying the Index for Relevant Chunks # create function to query the index with a user's question # find the most relevant chunks, and display them: def search(query, k=5): query_embedding = model.encode([query])[0].astype(np.float32) distances, indices = index.search(np.array([query_embedding]), k) return [chunks[idx] for idx in indices[0]] # GPT Integration: client = OpenAI(api_key='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') # akes the semantically searched chunks as input and generates a response using # the ChatGPT API def generate_response_from_chunks(chunks, user_query, max_tokens=250): # Start constructing the prompt with more structured guidance prompt = "search results:\n\n" for i, chunk in enumerate(chunks, start=1): prompt += f"{i}. {chunk}\n\n" prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. " \ "Cite each reference using [number] notation (every result has a number at the beginning). " \ "Citation should be done at the end of each sentence. If the search results mention multiple subjects " \ "with the same name, create separate answers for each. Only include information found in the results and " \ "don't add any additional information. Make sure the answer is correct and don't output false content. " \ "If the text does not relate to the query, simply state 'Found Nothing'. Don't write 'Answer:' " \ "Directly start the answer.\n" prompt += f"Query: {user_query}\n\n" # Send the prompt to the ChatGPT model using the chat/completions endpoint response = client.chat.completions.create(model="gpt-4", # Specify the chat model you're using messages=[ {"role": "system", "content": prompt}, {"role": "user", "content": "Please provide a response based on the above instructions."} ], temperature=0.7, max_tokens=max_tokens, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0) # Extracting and returning the text from the response return response.choices[0].message.content.strip() # relevant_chunks = search(user_query) # Assume `search` is a function that retrieves relevant academic text chunks # # Generate a cohesive response using the improved ChatGPT prompt structure # response = generate_response_from_chunks(relevant_chunks, user_query) # Function to download PDF from a user-inputted URL # def download_pdf(url, output_path): # urllib.request.urlretrieve(url, output_path) def process_pdf(pdf_path, user_query): texts = pdf_to_text(pdf_path, start_page=1) chunks = text_to_chunks(texts, word_length=150) relevant_chunks = search(user_query) # Assume `search` is a function that retrieves relevant academic text chunks response = generate_response_from_chunks(relevant_chunks, user_query) return response title = 'BookGPT' description = "BookGPT allows you to input an entire book and ask questions about its contents. This app uses GPT-3 to generate answers based on the book's information. BookGPT has ability to add reference to the specific page number from where the information was found. This adds credibility to the answers generated also helps you locate the relevant information in the book." with gr.Blocks() as demo: gr.Markdown(f'