File size: 7,435 Bytes
6d4a830 e9906c3 eeeebca e9906c3 52e2bdd e9906c3 52e2bdd e9906c3 52e2bdd e9906c3 eeeebca 52e2bdd e9906c3 52e2bdd e9906c3 eeeebca 57a3a79 971f209 57a3a79 dd9079c 5e04ea8 dd9079c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | import gradio as gr
import urllib.request
import fitz # PyMuPDF (takes PDF)
import re
from tqdm import tqdm
from openai import OpenAI
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
# Function to download PDF from a user-inputted URL
# Uses the urllib.request.urlretrieve function to fetch the PDF from the url and
# save it as a file at output_path.
# def download_pdf(url, output_path):
# urllib.request.urlretrieve(url, output_path)
# Preprocessing function to clean text
# Cleans up the extracted text by removing newlines and extra spaces.
def preprocess(text):
text = text.replace('\n', ' ')
text = re.sub('\s+', ' ', text)
return text
# Convert PDF document to text
# Opens the PDF using fitz.open (PyMuPDF).
# Iterates through the specified page range, extracting text from each page.
# Applies the preprocess function to clean up each page's text.
# Collects and returns a list of the cleaned text strings, one for each page.
def pdf_to_text(path, start_page=1, end_page=None):
doc = fitz.open(path)
total_pages = doc.page_count
if end_page is None or end_page > total_pages:
end_page = total_pages
text_list = []
for i in tqdm(range(start_page-1, end_page), desc="Extracting text from PDF"):
text = doc.load_page(i).get_text("text")
text = preprocess(text)
text_list.append(text)
doc.close()
return text_list
# Convert list of texts to smaller chunks
# Iterates through the list of preprocessed text strings (texts).
# For each text string, splits it into words and then groups into chunks
# If the end of a chunk falls short of the word_length and it's not the last
# chunk, the remaining words are prepended to the next text string to avoid
# having short ending chunk.
# Each chunk is prefixed with its page number and enclosed in quotes.
def text_to_chunks(texts, word_length=150, start_page=1):
chunks = []
buffer = []
for idx, text in enumerate(texts):
words = text.split(' ')
for word in words:
buffer.append(word)
if len(buffer) >= word_length:
chunk = ' '.join(buffer).strip()
chunks.append(f'Page {idx+start_page}: "{chunk}"')
buffer = []
# Handle the remaining buffer if it's long enough
if len(buffer) >= word_length:
chunk = ' '.join(buffer).strip()
chunks.append(f'Page {idx+start_page}: "{chunk}"')
buffer = []
return chunks
# texts = pdf_to_text(pdf_path, start_page=1)
# chunks = text_to_chunks(texts, word_length=150)
# Chunk Embedding:
model = SentenceTransformer('all-MiniLM-L6-v2')
# Assuming `chunks` is your list of preprocessed text chunks
# embeddings = model.encode(chunks, show_progress_bar=True)
# dimension = embeddings.shape[1] # Dimension of embeddings
# index = faiss.IndexFlatL2(dimension) # L2 distance for similarity
# index.add(embeddings.astype(np.float32)) # Add embeddings to index
# Querying the Index for Relevant Chunks
# create function to query the index with a user's question
# find the most relevant chunks, and display them:
def search(query, k=5):
query_embedding = model.encode([query])[0].astype(np.float32)
distances, indices = index.search(np.array([query_embedding]), k)
return [chunks[idx] for idx in indices[0]]
# GPT Integration:
client = OpenAI(api_key='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')
# akes the semantically searched chunks as input and generates a response using
# the ChatGPT API
def generate_response_from_chunks(chunks, user_query, max_tokens=250):
# Start constructing the prompt with more structured guidance
prompt = "search results:\n\n"
for i, chunk in enumerate(chunks, start=1):
prompt += f"{i}. {chunk}\n\n"
prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. " \
"Cite each reference using [number] notation (every result has a number at the beginning). " \
"Citation should be done at the end of each sentence. If the search results mention multiple subjects " \
"with the same name, create separate answers for each. Only include information found in the results and " \
"don't add any additional information. Make sure the answer is correct and don't output false content. " \
"If the text does not relate to the query, simply state 'Found Nothing'. Don't write 'Answer:' " \
"Directly start the answer.\n"
prompt += f"Query: {user_query}\n\n"
# Send the prompt to the ChatGPT model using the chat/completions endpoint
response = client.chat.completions.create(model="gpt-4", # Specify the chat model you're using
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": "Please provide a response based on the above instructions."}
],
temperature=0.7,
max_tokens=max_tokens,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0)
# Extracting and returning the text from the response
return response.choices[0].message.content.strip()
# relevant_chunks = search(user_query) # Assume `search` is a function that retrieves relevant academic text chunks
# # Generate a cohesive response using the improved ChatGPT prompt structure
# response = generate_response_from_chunks(relevant_chunks, user_query)
# Function to download PDF from a user-inputted URL
# def download_pdf(url, output_path):
# urllib.request.urlretrieve(url, output_path)
def process_pdf(pdf_path, user_query):
texts = pdf_to_text(pdf_path, start_page=1)
chunks = text_to_chunks(texts, word_length=150)
relevant_chunks = search(user_query) # Assume `search` is a function that retrieves relevant academic text chunks
response = generate_response_from_chunks(relevant_chunks, user_query)
return response
title = 'BookGPT'
description = "BookGPT allows you to input an entire book and ask questions about its contents. This app uses GPT-3 to generate answers based on the book's information. BookGPT has ability to add reference to the specific page number from where the information was found. This adds credibility to the answers generated also helps you locate the relevant information in the book."
with gr.Blocks() as demo:
gr.Markdown(f'<center><h1>{title}</h1></center>')
gr.Markdown(description)
gr.Markdown("Thank you for all the support this space has received! Unfortunately, my OpenAI $18 grant has been exhausted, so you'll need to enter your own OpenAI API Key to use the app. Sorry for inconvenience :-(.")
with gr.Row():
with gr.Group():
file = 'GANPaper.pdf'
question = gr.Textbox(label='question')
btn = gr.Button(value='Submit')
with gr.Group():
answer = gr.Textbox(label='answer')
btn.click(process_pdf, inputs=[file, question], outputs=[answer])
demo.launch() |