Update app.py
Browse files
app.py
CHANGED
|
@@ -6,10 +6,10 @@ import torch
|
|
| 6 |
model_name = "cross-encoder/ms-marco-MiniLM-L-12-v2"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 9 |
-
model.eval()
|
| 10 |
|
| 11 |
# Function to compute relevance score and dynamically adjust threshold
|
| 12 |
-
def get_relevance_score_and_excerpt(query, paragraph):
|
| 13 |
if not query.strip() or not paragraph.strip():
|
| 14 |
return "Please provide both a query and a document paragraph.", ""
|
| 15 |
|
|
@@ -17,32 +17,29 @@ def get_relevance_score_and_excerpt(query, paragraph):
|
|
| 17 |
inputs = tokenizer(query, paragraph, return_tensors="pt", truncation=True, padding=True)
|
| 18 |
|
| 19 |
with torch.no_grad():
|
| 20 |
-
output = model(**inputs, output_attentions=True)
|
| 21 |
|
| 22 |
# Extract logits and calculate base relevance score
|
| 23 |
logit = output.logits.squeeze().item()
|
| 24 |
base_relevance_score = torch.sigmoid(torch.tensor(logit)).item()
|
| 25 |
|
| 26 |
-
# Dynamically adjust the attention threshold based on relevance score
|
| 27 |
-
dynamic_threshold = max(0.02,
|
| 28 |
|
| 29 |
# Extract attention scores (last layer)
|
| 30 |
-
attention = output.attentions[-1]
|
| 31 |
-
attention_scores = attention.mean(dim=1).mean(dim=0)
|
| 32 |
|
| 33 |
-
# Tokenize query and paragraph separately
|
| 34 |
query_tokens = tokenizer.tokenize(query)
|
| 35 |
paragraph_tokens = tokenizer.tokenize(paragraph)
|
| 36 |
|
| 37 |
query_len = len(query_tokens) + 2 # +2 for special tokens [CLS] and first [SEP]
|
| 38 |
para_start_idx = query_len
|
| 39 |
-
para_end_idx = len(inputs["input_ids"][0]) - 1
|
| 40 |
|
| 41 |
-
# Handle potential indexing issues
|
| 42 |
if para_end_idx <= para_start_idx:
|
| 43 |
return round(base_relevance_score, 4), "No relevant tokens extracted."
|
| 44 |
|
| 45 |
-
# Extract paragraph attention scores and apply dynamic threshold
|
| 46 |
para_attention_scores = attention_scores[para_start_idx:para_end_idx, para_start_idx:para_end_idx].mean(dim=0)
|
| 47 |
|
| 48 |
if para_attention_scores.numel() == 0:
|
|
@@ -51,39 +48,32 @@ def get_relevance_score_and_excerpt(query, paragraph):
|
|
| 51 |
# Get indices of relevant tokens above dynamic threshold
|
| 52 |
relevant_indices = (para_attention_scores > dynamic_threshold).nonzero(as_tuple=True)[0].tolist()
|
| 53 |
|
| 54 |
-
#
|
| 55 |
-
if relevant_indices:
|
| 56 |
-
relevant_attention_values = para_attention_scores[relevant_indices]
|
| 57 |
-
attention_weighted_score = relevant_attention_values.mean().item() * base_relevance_score
|
| 58 |
-
else:
|
| 59 |
-
attention_weighted_score = base_relevance_score # No relevant tokens found
|
| 60 |
-
|
| 61 |
-
# Reconstruct paragraph with bolded relevant tokens
|
| 62 |
highlighted_text = ""
|
| 63 |
for idx, token in enumerate(paragraph_tokens):
|
| 64 |
if idx in relevant_indices:
|
| 65 |
-
highlighted_text += f"
|
| 66 |
else:
|
| 67 |
highlighted_text += f"{token} "
|
| 68 |
|
| 69 |
-
# Convert tokens back to readable format
|
| 70 |
highlighted_text = tokenizer.convert_tokens_to_string(highlighted_text.split())
|
| 71 |
|
| 72 |
-
return round(
|
| 73 |
|
| 74 |
-
# Define Gradio interface
|
| 75 |
interface = gr.Interface(
|
| 76 |
fn=get_relevance_score_and_excerpt,
|
| 77 |
inputs=[
|
| 78 |
gr.Textbox(label="Query", placeholder="Enter your search query..."),
|
| 79 |
-
gr.Textbox(label="Document Paragraph", placeholder="Enter a paragraph to match...")
|
|
|
|
| 80 |
],
|
| 81 |
outputs=[
|
| 82 |
-
gr.Textbox(label="
|
| 83 |
gr.HTML(label="Highlighted Document Paragraph")
|
| 84 |
],
|
| 85 |
-
title="Cross-Encoder
|
| 86 |
-
description="
|
| 87 |
allow_flagging="never",
|
| 88 |
live=True
|
| 89 |
)
|
|
|
|
| 6 |
model_name = "cross-encoder/ms-marco-MiniLM-L-12-v2"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 9 |
+
model.eval()
|
| 10 |
|
| 11 |
# Function to compute relevance score and dynamically adjust threshold
|
| 12 |
+
def get_relevance_score_and_excerpt(query, paragraph, threshold_weight):
|
| 13 |
if not query.strip() or not paragraph.strip():
|
| 14 |
return "Please provide both a query and a document paragraph.", ""
|
| 15 |
|
|
|
|
| 17 |
inputs = tokenizer(query, paragraph, return_tensors="pt", truncation=True, padding=True)
|
| 18 |
|
| 19 |
with torch.no_grad():
|
| 20 |
+
output = model(**inputs, output_attentions=True)
|
| 21 |
|
| 22 |
# Extract logits and calculate base relevance score
|
| 23 |
logit = output.logits.squeeze().item()
|
| 24 |
base_relevance_score = torch.sigmoid(torch.tensor(logit)).item()
|
| 25 |
|
| 26 |
+
# Dynamically adjust the attention threshold based on user weight (no relevance score influence)
|
| 27 |
+
dynamic_threshold = max(0.02, threshold_weight)
|
| 28 |
|
| 29 |
# Extract attention scores (last layer)
|
| 30 |
+
attention = output.attentions[-1]
|
| 31 |
+
attention_scores = attention.mean(dim=1).mean(dim=0)
|
| 32 |
|
|
|
|
| 33 |
query_tokens = tokenizer.tokenize(query)
|
| 34 |
paragraph_tokens = tokenizer.tokenize(paragraph)
|
| 35 |
|
| 36 |
query_len = len(query_tokens) + 2 # +2 for special tokens [CLS] and first [SEP]
|
| 37 |
para_start_idx = query_len
|
| 38 |
+
para_end_idx = len(inputs["input_ids"][0]) - 1
|
| 39 |
|
|
|
|
| 40 |
if para_end_idx <= para_start_idx:
|
| 41 |
return round(base_relevance_score, 4), "No relevant tokens extracted."
|
| 42 |
|
|
|
|
| 43 |
para_attention_scores = attention_scores[para_start_idx:para_end_idx, para_start_idx:para_end_idx].mean(dim=0)
|
| 44 |
|
| 45 |
if para_attention_scores.numel() == 0:
|
|
|
|
| 48 |
# Get indices of relevant tokens above dynamic threshold
|
| 49 |
relevant_indices = (para_attention_scores > dynamic_threshold).nonzero(as_tuple=True)[0].tolist()
|
| 50 |
|
| 51 |
+
# Reconstruct paragraph with bolded relevant tokens using HTML tags
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
highlighted_text = ""
|
| 53 |
for idx, token in enumerate(paragraph_tokens):
|
| 54 |
if idx in relevant_indices:
|
| 55 |
+
highlighted_text += f"<b>{token}</b> "
|
| 56 |
else:
|
| 57 |
highlighted_text += f"{token} "
|
| 58 |
|
|
|
|
| 59 |
highlighted_text = tokenizer.convert_tokens_to_string(highlighted_text.split())
|
| 60 |
|
| 61 |
+
return round(base_relevance_score, 4), highlighted_text
|
| 62 |
|
| 63 |
+
# Define Gradio interface with a slider for threshold adjustment
|
| 64 |
interface = gr.Interface(
|
| 65 |
fn=get_relevance_score_and_excerpt,
|
| 66 |
inputs=[
|
| 67 |
gr.Textbox(label="Query", placeholder="Enter your search query..."),
|
| 68 |
+
gr.Textbox(label="Document Paragraph", placeholder="Enter a paragraph to match..."),
|
| 69 |
+
gr.Slider(minimum=0.02, maximum=0.5, value=0.1, step=0.01, label="Attention Threshold")
|
| 70 |
],
|
| 71 |
outputs=[
|
| 72 |
+
gr.Textbox(label="Relevance Score"),
|
| 73 |
gr.HTML(label="Highlighted Document Paragraph")
|
| 74 |
],
|
| 75 |
+
title="Cross-Encoder Attention Highlighting",
|
| 76 |
+
description="Adjust the attention threshold to control token highlighting sensitivity.",
|
| 77 |
allow_flagging="never",
|
| 78 |
live=True
|
| 79 |
)
|