File size: 11,860 Bytes
d520909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
"""
Reranker Agent

Cross-encoder based reranking for improved retrieval precision.
Follows FAANG best practices for production RAG systems.

Key Features:
- LLM-based cross-encoder reranking
- Relevance scoring with explanations
- Diversity promotion to avoid redundancy
- Quality filtering (removes low-quality chunks)
- Chunk deduplication
"""

from typing import List, Optional, Dict, Any, Tuple
from pydantic import BaseModel, Field
from loguru import logger
from dataclasses import dataclass
import json
import re
from difflib import SequenceMatcher

try:
    import httpx
    HTTPX_AVAILABLE = True
except ImportError:
    HTTPX_AVAILABLE = False

from .retriever import RetrievalResult


class RerankerConfig(BaseModel):
    """Configuration for reranking."""
    # LLM settings
    model: str = Field(default="llama3.2:3b")
    base_url: str = Field(default="http://localhost:11434")
    temperature: float = Field(default=0.1)

    # Reranking settings
    top_k: int = Field(default=5, ge=1)
    min_relevance_score: float = Field(default=0.3, ge=0.0, le=1.0)

    # Diversity settings
    enable_diversity: bool = Field(default=True)
    diversity_threshold: float = Field(default=0.8, description="Max similarity between chunks")

    # Deduplication
    dedup_threshold: float = Field(default=0.9, description="Similarity threshold for dedup")

    # Use LLM for reranking (vs heuristic)
    use_llm_rerank: bool = Field(default=True)


class RankedResult(BaseModel):
    """A reranked result with relevance score."""
    chunk_id: str
    document_id: str
    text: str
    original_score: float
    relevance_score: float  # Cross-encoder score
    final_score: float  # Combined score
    relevance_explanation: Optional[str] = None

    # From original result
    page: Optional[int] = None
    chunk_type: Optional[str] = None
    source_path: Optional[str] = None
    metadata: Dict[str, Any] = Field(default_factory=dict)
    bbox: Optional[Dict[str, float]] = None


class RerankerAgent:
    """
    Reranks retrieval results for improved precision.

    Capabilities:
    1. Cross-encoder relevance scoring
    2. Diversity-aware reranking (MMR-style)
    3. Quality filtering
    4. Chunk deduplication
    """

    RERANK_PROMPT = """Score the relevance of this text passage to the given query.

Query: {query}

Passage: {passage}

Score the relevance on a scale of 0-10 where:
- 0-2: Completely irrelevant, no useful information
- 3-4: Marginally relevant, tangentially related
- 5-6: Somewhat relevant, contains some useful information
- 7-8: Highly relevant, directly addresses the query
- 9-10: Perfectly relevant, comprehensive answer to query

Respond with ONLY a JSON object:
{{"score": <number>, "explanation": "<brief reason>"}}"""

    def __init__(self, config: Optional[RerankerConfig] = None):
        """
        Initialize Reranker Agent.

        Args:
            config: Reranker configuration
        """
        self.config = config or RerankerConfig()
        logger.info(f"RerankerAgent initialized (model={self.config.model})")

    def rerank(
        self,
        query: str,
        results: List[RetrievalResult],
        top_k: Optional[int] = None,
    ) -> List[RankedResult]:
        """
        Rerank retrieval results by relevance to query.

        Args:
            query: Original search query
            results: Retrieval results to rerank
            top_k: Number of results to return

        Returns:
            Reranked results with relevance scores
        """
        if not results:
            return []

        top_k = top_k or self.config.top_k

        # Step 1: Deduplicate
        deduped = self._deduplicate(results)

        # Step 2: Score relevance
        if self.config.use_llm_rerank and HTTPX_AVAILABLE:
            scored = self._llm_rerank(query, deduped)
        else:
            scored = self._heuristic_rerank(query, deduped)

        # Step 3: Filter low-quality
        filtered = [
            r for r in scored
            if r.relevance_score >= self.config.min_relevance_score
        ]

        # Step 4: Diversity promotion (MMR-style)
        if self.config.enable_diversity:
            diverse = self._promote_diversity(filtered, top_k)
        else:
            diverse = sorted(filtered, key=lambda x: x.final_score, reverse=True)[:top_k]

        return diverse

    def _deduplicate(self, results: List[RetrievalResult]) -> List[RetrievalResult]:
        """Remove near-duplicate chunks."""
        if not results:
            return []

        deduped = [results[0]]

        for result in results[1:]:
            is_dup = False
            for existing in deduped:
                similarity = self._text_similarity(result.text, existing.text)
                if similarity > self.config.dedup_threshold:
                    is_dup = True
                    break

            if not is_dup:
                deduped.append(result)

        if len(results) != len(deduped):
            logger.debug(f"Deduplication: {len(results)} -> {len(deduped)} chunks")

        return deduped

    def _text_similarity(self, text1: str, text2: str) -> float:
        """Compute text similarity using SequenceMatcher."""
        return SequenceMatcher(None, text1.lower(), text2.lower()).ratio()

    def _llm_rerank(
        self,
        query: str,
        results: List[RetrievalResult],
    ) -> List[RankedResult]:
        """Use LLM for cross-encoder style reranking."""
        ranked = []

        for result in results:
            try:
                relevance_score, explanation = self._score_passage(query, result.text)

                # Combine original score with relevance score
                # Weight relevance more heavily
                final_score = 0.3 * result.score + 0.7 * (relevance_score / 10.0)

                ranked.append(RankedResult(
                    chunk_id=result.chunk_id,
                    document_id=result.document_id,
                    text=result.text,
                    original_score=result.score,
                    relevance_score=relevance_score / 10.0,  # Normalize to 0-1
                    final_score=final_score,
                    relevance_explanation=explanation,
                    page=result.page,
                    chunk_type=result.chunk_type,
                    source_path=result.source_path,
                    metadata=result.metadata,
                    bbox=result.bbox,
                ))

            except Exception as e:
                logger.warning(f"Failed to score passage: {e}")
                # Fall back to original score
                ranked.append(RankedResult(
                    chunk_id=result.chunk_id,
                    document_id=result.document_id,
                    text=result.text,
                    original_score=result.score,
                    relevance_score=result.score,
                    final_score=result.score,
                    page=result.page,
                    chunk_type=result.chunk_type,
                    source_path=result.source_path,
                    metadata=result.metadata,
                    bbox=result.bbox,
                ))

        return ranked

    def _score_passage(self, query: str, passage: str) -> Tuple[float, str]:
        """Score a single passage using LLM."""
        prompt = self.RERANK_PROMPT.format(
            query=query,
            passage=passage[:1000],  # Truncate long passages
        )

        with httpx.Client(timeout=30.0) as client:
            response = client.post(
                f"{self.config.base_url}/api/generate",
                json={
                    "model": self.config.model,
                    "prompt": prompt,
                    "stream": False,
                    "options": {
                        "temperature": self.config.temperature,
                        "num_predict": 256,
                    },
                },
            )
            response.raise_for_status()
            result = response.json()

        # Parse response
        response_text = result.get("response", "")
        return self._parse_score_response(response_text)

    def _parse_score_response(self, text: str) -> Tuple[float, str]:
        """Parse score and explanation from LLM response."""
        try:
            # Find JSON in response
            json_match = re.search(r'\{[\s\S]*\}', text)
            if json_match:
                data = json.loads(json_match.group())
                score = float(data.get("score", 5))
                explanation = data.get("explanation", "")
                return min(max(score, 0), 10), explanation
        except Exception:
            pass

        # Try to find just a number
        num_match = re.search(r'\b([0-9]|10)\b', text)
        if num_match:
            return float(num_match.group()), ""

        # Default
        return 5.0, "Could not parse score"

    def _heuristic_rerank(
        self,
        query: str,
        results: List[RetrievalResult],
    ) -> List[RankedResult]:
        """Fast heuristic-based reranking."""
        query_terms = set(query.lower().split())
        ranked = []

        for result in results:
            # Compute heuristic relevance
            text_lower = result.text.lower()

            # Term overlap
            text_terms = set(text_lower.split())
            overlap = len(query_terms & text_terms) / len(query_terms) if query_terms else 0

            # Phrase matching bonus
            phrase_bonus = 0.2 if query.lower() in text_lower else 0

            # Length penalty (prefer medium-length chunks)
            length = len(result.text)
            length_score = min(length, 500) / 500  # Cap at 500 chars

            # Combine scores
            relevance = 0.5 * overlap + 0.3 * phrase_bonus + 0.2 * length_score
            final_score = 0.4 * result.score + 0.6 * relevance

            ranked.append(RankedResult(
                chunk_id=result.chunk_id,
                document_id=result.document_id,
                text=result.text,
                original_score=result.score,
                relevance_score=relevance,
                final_score=final_score,
                page=result.page,
                chunk_type=result.chunk_type,
                source_path=result.source_path,
                metadata=result.metadata,
                bbox=result.bbox,
            ))

        return ranked

    def _promote_diversity(
        self,
        results: List[RankedResult],
        top_k: int,
    ) -> List[RankedResult]:
        """
        Promote diversity using MMR-style selection.

        Maximal Marginal Relevance balances relevance with diversity.
        """
        if not results:
            return []

        # Sort by final score first
        sorted_results = sorted(results, key=lambda x: x.final_score, reverse=True)

        selected = [sorted_results[0]]
        remaining = sorted_results[1:]

        while len(selected) < top_k and remaining:
            # Find result with best MMR score
            best_mmr = -1
            best_idx = 0

            for i, candidate in enumerate(remaining):
                # Relevance component
                relevance = candidate.final_score

                # Diversity component (max similarity to selected)
                max_sim = max(
                    self._text_similarity(candidate.text, s.text)
                    for s in selected
                )

                # MMR = lambda * relevance - (1-lambda) * max_similarity
                # Using lambda = 0.7 (favor relevance)
                mmr = 0.7 * relevance - 0.3 * max_sim

                if mmr > best_mmr:
                    best_mmr = mmr
                    best_idx = i

            selected.append(remaining.pop(best_idx))

        return selected