Spaces:
Sleeping
Sleeping
File size: 2,454 Bytes
cbcd5d2 de1b9f0 db208f0 06f1955 de1b9f0 06f1955 de1b9f0 06f1955 de1b9f0 06f1955 de1b9f0 06f1955 de1b9f0 06f1955 de1b9f0 06f1955 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
from tools.base_tool import BaseTool
from docling.document_converter import DocumentConverter
from docling.chunking import HierarchicalChunker
from sentence_transformers import SentenceTransformer, util
import torch
class ContentRetrieverTool(BaseTool):
name = 'retrieve_content'
description = "Extracts relevant content from a file or URL (PDF, DOCX, XLSX, HTML, etc.) based on a given query."
inputs = {
"url": {
"type": "string",
"description": "The document URL or local path to load content from.",
},
"query": {
"type": "string",
"description": "Query term(s) used to filter relevant content from the document.",
},
}
output_type = "string"
def __init__(self, model_name: str = 'all-MiniLM-L6-v2', threshold: float = 0.2):
self.threshold = threshold
self._converter = DocumentConverter()
self._chunker = HierarchicalChunker()
self._embedder = SentenceTransformer(model_name)
super().__init__()
def forward(self, url: str, query: str) -> str:
doc = self._converter.convert(url).document
chunks = list(self._chunker.chunk(dl_doc=doc))
if not chunks:
return "No content found."
texts = [chunk.text for chunk in chunks]
contextual_chunks = [self._chunker.contextualize(c) for c in chunks]
context_texts = [ctx.replace(txt, "").strip() for txt, ctx in zip(texts, contextual_chunks)]
query_embedding = self._embedder.encode(
[q.strip() for q in query.split(",") if q.strip()],
convert_to_tensor=True,
)
matches = set()
for corpus in [texts, context_texts]:
embeddings = self._embedder.encode(corpus, convert_to_tensor=True)
for score in util.pytorch_cos_sim(query_embedding, embeddings):
probs = torch.nn.functional.softmax(score, dim=0)
sorted_idxs = torch.argsort(probs, descending=True)
cum_prob = 0.0
for idx in sorted_idxs:
cum_prob += probs[idx].item()
matches.add(idx.item())
if cum_prob >= self.threshold:
break
if not matches:
return "No relevant chunks found."
selected_chunks = [contextual_chunks[i] for i in sorted(matches)]
return "\n\n".join(selected_chunks) |