resumeGPT / app.py
arian81's picture
bug fix
5580109 unverified
import asyncio
import aiohttp
import fitz
import re
import numpy as np
import tensorflow_hub as hub
import openai
import gradio as gr
import os
from sklearn.neighbors import NearestNeighbors
async def download_pdf(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
with open("document.pdf", "wb") as f:
f.write(content)
def preprocess(text):
text = text.replace("\n", " ")
text = re.sub("\s+", " ", text)
return text
def pdf_to_text(path, start_page=1, end_page=None):
doc = fitz.open(path)
total_pages = doc.page_count
if end_page is None:
end_page = total_pages
text_list = []
for i in range(start_page - 1, end_page):
text = doc.load_page(i).get_text("text")
text = preprocess(text)
text_list.append(text)
doc.close()
return text_list
def text_to_chunks(texts, word_length=150, start_page=1):
text_toks = [t.split(" ") for t in texts]
page_nums = []
chunks = []
for idx, words in enumerate(text_toks):
for i in range(0, len(words), word_length):
chunk = words[i : i + word_length]
if (
(i + word_length) > len(words)
and (len(chunk) < word_length)
and (len(text_toks) != (idx + 1))
):
text_toks[idx + 1] = chunk + text_toks[idx + 1]
continue
chunk = " ".join(chunk).strip()
chunk = f"[{idx+start_page}]" + " " + '"' + chunk + '"'
chunks.append(chunk)
return chunks
class SemanticSearch:
def __init__(self):
self.use = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
self.fitted = False
def fit(self, data, batch=1000, n_neighbors=5):
self.data = data
self.embeddings = self.get_text_embedding(data, batch=batch)
n_neighbors = min(n_neighbors, len(self.embeddings))
self.nn = NearestNeighbors(n_neighbors=n_neighbors)
self.nn.fit(self.embeddings)
self.fitted = True
def __call__(self, text, return_data=True):
inp_emb = self.use([text])
neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
if return_data:
return [self.data[i] for i in neighbors]
else:
return neighbors
def get_text_embedding(self, texts, batch=1000):
embeddings = []
for i in range(0, len(texts), batch):
text_batch = texts[i : (i + batch)]
emb_batch = self.use(text_batch)
embeddings.append(emb_batch)
embeddings = np.vstack(embeddings)
return embeddings
def load_recommender(path, start_page=1):
global recommender
texts = pdf_to_text(path, start_page=start_page)
chunks = text_to_chunks(texts, start_page=start_page)
recommender.fit(chunks)
return "Corpus Loaded."
def generate_text(openAI_key, prompt, question, engine="gpt-3.5-turbo"):
openai.api_key = openAI_key
completions = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful AI assisstant trained to answer questions about a person .",
},
{
"role": "user",
"content": "Below is the information about a person, you can find their name in there as well. use only that to answer questions asked about them. ",
},
{"role": "user", "content": prompt},
{
"role": "user",
"content": "Your answers should be complete and detailed but not too long. Don't answer with any information that's not included in the above inofrmation that means, don't use infromation that's in your dataset. If asked any questions about hiring, answer with yes and provide related skills and exprience to convince they should be hired for the position they're being hired for. If a question is unrelated or you can't find any information about it in provided text above, answer with 'Unrelated question'. Do not respond with false information that's not from the information given above. Ignore outlier information which has nothing to do with the question. Don't mention the above text as the source of your information, just answer the question as you just know the above information. Refer to the person mentioned above using their actual name. Sound helpful and friendly",
},
{"role": "user", "content": question},
],
max_tokens=250,
temperature=0.7,
)
message = completions["choices"][0]["message"]["content"]
return message
def generate_answer(question, openAI_key):
topn_chunks = recommender(question)
prompt = ""
prompt += "Information:\n\n"
for c in topn_chunks:
prompt += c + "\n\n"
answer = generate_text(openAI_key, prompt, question, "gpt-3.5-turbo")
return answer
def question_answer(url, file, question, openAI_key):
if openAI_key.strip() == "":
return "[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys"
if url.strip() == "" and file == None:
return "[ERROR]: Both URL and PDF is empty. Provide atleast one."
if url.strip() != "" and file != None:
return "[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF)."
if url.strip() != "":
glob_url = url
asyncio.run(download_pdf(glob_url))
load_recommender("document.pdf")
else:
old_file_name = file.name
file_name = file.name
file_name = file_name[:-12] + file_name[-4:]
os.rename(old_file_name, file_name)
load_recommender(file_name)
if question.strip() == "":
return "[ERROR]: Question field is empty"
return generate_answer(question, openAI_key)
recommender = SemanticSearch()
title = "Resume GPT"
description = """ This is an alternative use case of feeding PDFs to chatGPT. I repurposed https://huggingface.co/spaces/bhaskartripathi/pdfChatter to be used as chatbot answering questions about me using my resume. I also changed the model from text-davinci-003 to gpt-3.5-turbo which is cheaper and has better results."""
with gr.Blocks() as demo:
gr.Markdown(f"<center><h1>{title}</h1></center>")
gr.Markdown(description)
with gr.Row():
with gr.Group():
gr.Markdown(
f'<p style="text-align:center">Get your Open AI API key <a href="https://platform.openai.com/account/api-keys">here</a></p>'
)
openAI_key = gr.Textbox(label="Enter your OpenAI API key here")
url = gr.Textbox(label="Enter PDF URL here")
gr.Markdown("<center><h4>OR<h4></center>")
file = gr.File(
label="Upload your PDF/ Research Paper / Book here", file_types=[".pdf"]
)
question = gr.Textbox(label="Enter your question here")
btn = gr.Button(value="Submit")
btn.style(full_width=True)
with gr.Group():
answer = gr.Textbox(label="The answer to your question is :")
btn.click(
question_answer, inputs=[url, file, question, openAI_key], outputs=[answer]
)
demo.launch()