Spaces:
Runtime error
Runtime error
Reranker
Browse files- app.py +12 -4
- backend/semantic_search.py +29 -4
app.py
CHANGED
|
@@ -6,12 +6,17 @@ import logging
|
|
| 6 |
from pathlib import Path
|
| 7 |
from time import perf_counter
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import gradio as gr
|
| 10 |
from jinja2 import Environment, FileSystemLoader
|
| 11 |
|
| 12 |
from backend.query_llm import generate_hf, generate_openai
|
| 13 |
from backend.semantic_search import retrieve
|
| 14 |
|
|
|
|
| 15 |
|
| 16 |
TOP_K = int(os.getenv("TOP_K", 4))
|
| 17 |
|
|
@@ -34,7 +39,7 @@ def add_text(history, text):
|
|
| 34 |
return history, gr.Textbox(value="", interactive=False)
|
| 35 |
|
| 36 |
|
| 37 |
-
def bot(history, api_kind):
|
| 38 |
query = history[-1][0]
|
| 39 |
|
| 40 |
if not query:
|
|
@@ -44,7 +49,7 @@ def bot(history, api_kind):
|
|
| 44 |
# Retrieve documents relevant to query
|
| 45 |
document_start = perf_counter()
|
| 46 |
|
| 47 |
-
documents = retrieve(query, TOP_K)
|
| 48 |
|
| 49 |
document_time = perf_counter() - document_start
|
| 50 |
logger.info(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
|
|
@@ -60,6 +65,8 @@ def bot(history, api_kind):
|
|
| 60 |
else:
|
| 61 |
raise gr.Error(f"API {api_kind} is not supported")
|
| 62 |
|
|
|
|
|
|
|
| 63 |
history[-1][1] = ""
|
| 64 |
for character in generate_fn(prompt, history[:-1]):
|
| 65 |
history[-1][1] = character
|
|
@@ -87,18 +94,19 @@ with gr.Blocks() as demo:
|
|
| 87 |
txt_btn = gr.Button(value="Submit text", scale=1)
|
| 88 |
|
| 89 |
api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace")
|
|
|
|
| 90 |
|
| 91 |
prompt_html = gr.HTML()
|
| 92 |
# Turn off interactivity while generating if you click
|
| 93 |
txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
| 94 |
-
bot, [chatbot, api_kind], [chatbot, prompt_html])
|
| 95 |
|
| 96 |
# Turn it back on
|
| 97 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
| 98 |
|
| 99 |
# Turn off interactivity while generating if you hit enter
|
| 100 |
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
| 101 |
-
bot, [chatbot, api_kind], [chatbot, prompt_html])
|
| 102 |
|
| 103 |
# Turn it back on
|
| 104 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
|
|
|
| 6 |
from pathlib import Path
|
| 7 |
from time import perf_counter
|
| 8 |
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
print(load_dotenv())
|
| 11 |
+
|
| 12 |
+
|
| 13 |
import gradio as gr
|
| 14 |
from jinja2 import Environment, FileSystemLoader
|
| 15 |
|
| 16 |
from backend.query_llm import generate_hf, generate_openai
|
| 17 |
from backend.semantic_search import retrieve
|
| 18 |
|
| 19 |
+
# load_dotenv(os.path.join(os.path.dirname(__file__), '..', '.env'))
|
| 20 |
|
| 21 |
TOP_K = int(os.getenv("TOP_K", 4))
|
| 22 |
|
|
|
|
| 39 |
return history, gr.Textbox(value="", interactive=False)
|
| 40 |
|
| 41 |
|
| 42 |
+
def bot(history, api_kind, rerank):
|
| 43 |
query = history[-1][0]
|
| 44 |
|
| 45 |
if not query:
|
|
|
|
| 49 |
# Retrieve documents relevant to query
|
| 50 |
document_start = perf_counter()
|
| 51 |
|
| 52 |
+
documents = retrieve(query, TOP_K, rerank)
|
| 53 |
|
| 54 |
document_time = perf_counter() - document_start
|
| 55 |
logger.info(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
|
|
|
|
| 65 |
else:
|
| 66 |
raise gr.Error(f"API {api_kind} is not supported")
|
| 67 |
|
| 68 |
+
print(f"{prompt}")
|
| 69 |
+
|
| 70 |
history[-1][1] = ""
|
| 71 |
for character in generate_fn(prompt, history[:-1]):
|
| 72 |
history[-1][1] = character
|
|
|
|
| 94 |
txt_btn = gr.Button(value="Submit text", scale=1)
|
| 95 |
|
| 96 |
api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace")
|
| 97 |
+
rerank = gr.Checkbox(label="Rerank", value=True)
|
| 98 |
|
| 99 |
prompt_html = gr.HTML()
|
| 100 |
# Turn off interactivity while generating if you click
|
| 101 |
txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
| 102 |
+
bot, [chatbot, api_kind, rerank], [chatbot, prompt_html])
|
| 103 |
|
| 104 |
# Turn it back on
|
| 105 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
| 106 |
|
| 107 |
# Turn off interactivity while generating if you hit enter
|
| 108 |
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
| 109 |
+
bot, [chatbot, api_kind, rerank], [chatbot, prompt_html])
|
| 110 |
|
| 111 |
# Turn it back on
|
| 112 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
backend/semantic_search.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import lancedb
|
| 5 |
from sentence_transformers import SentenceTransformer
|
|
|
|
| 6 |
|
| 7 |
db = lancedb.connect(".lancedb")
|
| 8 |
|
|
@@ -12,15 +14,38 @@ TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text")
|
|
| 12 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
|
| 13 |
|
| 14 |
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
|
|
|
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
|
|
|
|
| 18 |
query_vec = retriever.encode(query)
|
| 19 |
try:
|
| 20 |
-
|
| 21 |
-
documents =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
except Exception as e:
|
| 26 |
raise gr.Error(str(e))
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import torch
|
| 3 |
|
| 4 |
import gradio as gr
|
| 5 |
import lancedb
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
| 8 |
|
| 9 |
db = lancedb.connect(".lancedb")
|
| 10 |
|
|
|
|
| 14 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
|
| 15 |
|
| 16 |
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
|
| 17 |
+
reranker_model = os.getenv("RERANKER_MODEL", None)
|
| 18 |
|
| 19 |
+
if reranker_model:
|
| 20 |
+
reranker = AutoModelForSequenceClassification.from_pretrained(reranker_model)
|
| 21 |
+
tokenizer = AutoTokenizer.from_pretrained(reranker_model)
|
| 22 |
+
reranker_pipeline = pipeline("text-classification", model=reranker, tokenizer=tokenizer)
|
| 23 |
|
| 24 |
+
|
| 25 |
+
def retrieve(query, k, rerank=True):
|
| 26 |
query_vec = retriever.encode(query)
|
| 27 |
try:
|
| 28 |
+
num_retrieve = k * (5 if rerank else 1)
|
| 29 |
+
documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(num_retrieve).to_list()
|
| 30 |
+
docs = [doc[TEXT_COLUMN] for doc in documents]
|
| 31 |
+
|
| 32 |
+
if not rerank:
|
| 33 |
+
return docs
|
| 34 |
+
|
| 35 |
+
assert reranker_model, "Reranker model is not provided"
|
| 36 |
|
| 37 |
+
reranked_documents = []
|
| 38 |
+
for i in range(0, len(docs), BATCH_SIZE):
|
| 39 |
+
batch_texts = docs[i:i+BATCH_SIZE]
|
| 40 |
+
inputs = tokenizer([query]*len(batch_texts), batch_texts, return_tensors="pt", padding=True, truncation=True)
|
| 41 |
+
with torch.no_grad():
|
| 42 |
+
outputs = reranker(**inputs)
|
| 43 |
+
logits = outputs.logits.squeeze().tolist()
|
| 44 |
+
reranked_documents.extend(zip(batch_texts, logits))
|
| 45 |
+
|
| 46 |
+
reranked_documents.sort(key=lambda x: x[1], reverse=True)
|
| 47 |
+
return [doc[0] for doc in reranked_documents[:k]]
|
| 48 |
|
| 49 |
except Exception as e:
|
| 50 |
raise gr.Error(str(e))
|
| 51 |
+
|