Spaces:
Sleeping
Sleeping
| from tools.base_tool import BaseTool | |
| from docling.document_converter import DocumentConverter | |
| from docling.chunking import HierarchicalChunker | |
| from sentence_transformers import SentenceTransformer, util | |
| import torch | |
| class ContentRetrieverTool(BaseTool): | |
| name = 'retrieve_content' | |
| description = "Extracts relevant content from a file or URL (PDF, DOCX, XLSX, HTML, etc.) based on a given query." | |
| inputs = { | |
| "url": { | |
| "type": "string", | |
| "description": "The document URL or local path to load content from.", | |
| }, | |
| "query": { | |
| "type": "string", | |
| "description": "Query term(s) used to filter relevant content from the document.", | |
| }, | |
| } | |
| output_type = "string" | |
| def __init__(self, model_name: str = 'all-MiniLM-L6-v2', threshold: float = 0.2): | |
| self.threshold = threshold | |
| self._converter = DocumentConverter() | |
| self._chunker = HierarchicalChunker() | |
| self._embedder = SentenceTransformer(model_name) | |
| super().__init__() | |
| def forward(self, url: str, query: str) -> str: | |
| doc = self._converter.convert(url).document | |
| chunks = list(self._chunker.chunk(dl_doc=doc)) | |
| if not chunks: | |
| return "No content found." | |
| texts = [chunk.text for chunk in chunks] | |
| contextual_chunks = [self._chunker.contextualize(c) for c in chunks] | |
| context_texts = [ctx.replace(txt, "").strip() for txt, ctx in zip(texts, contextual_chunks)] | |
| query_embedding = self._embedder.encode( | |
| [q.strip() for q in query.split(",") if q.strip()], | |
| convert_to_tensor=True, | |
| ) | |
| matches = set() | |
| for corpus in [texts, context_texts]: | |
| embeddings = self._embedder.encode(corpus, convert_to_tensor=True) | |
| for score in util.pytorch_cos_sim(query_embedding, embeddings): | |
| probs = torch.nn.functional.softmax(score, dim=0) | |
| sorted_idxs = torch.argsort(probs, descending=True) | |
| cum_prob = 0.0 | |
| for idx in sorted_idxs: | |
| cum_prob += probs[idx].item() | |
| matches.add(idx.item()) | |
| if cum_prob >= self.threshold: | |
| break | |
| if not matches: | |
| return "No relevant chunks found." | |
| selected_chunks = [contextual_chunks[i] for i in sorted(matches)] | |
| return "\n\n".join(selected_chunks) |