File size: 4,917 Bytes
4ff79c6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
#
# SPDX-License-Identifier: Apache-2.0
from enum import Enum
from typing import Any, Dict, List, Union
from haystack import component, default_to_dict
from haystack.dataclasses import Document
class RecallMode(Enum):
"""
Enum for the mode to use for calculating the recall score.
"""
# Score is based on whether any document is retrieved.
SINGLE_HIT = "single_hit"
# Score is based on how many documents were retrieved.
MULTI_HIT = "multi_hit"
def __str__(self):
return self.value
@staticmethod
def from_str(string: str) -> "RecallMode":
"""
Convert a string to a RecallMode enum.
"""
enum_map = {e.value: e for e in RecallMode}
mode = enum_map.get(string)
if mode is None:
msg = f"Unknown recall mode '{string}'. Supported modes are: {list(enum_map.keys())}"
raise ValueError(msg)
return mode
@component
class DocumentRecallEvaluator:
"""
Evaluator that calculates the Recall score for a list of documents.
Returns both a list of scores for each question and the average.
There can be multiple ground truth documents and multiple predicted documents as input.
Usage example:
```python
from haystack import Document
from haystack.components.evaluators import DocumentRecallEvaluator
evaluator = DocumentRecallEvaluator()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="9th")],
],
retrieved_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
],
)
print(result["individual_scores"])
# [1.0, 1.0]
print(result["score"])
# 1.0
```
"""
def __init__(self, mode: Union[str, RecallMode] = RecallMode.SINGLE_HIT):
"""
Create a DocumentRecallEvaluator component.
:param mode:
Mode to use for calculating the recall score.
"""
if isinstance(mode, str):
mode = RecallMode.from_str(mode)
mode_functions = {RecallMode.SINGLE_HIT: self._recall_single_hit, RecallMode.MULTI_HIT: self._recall_multi_hit}
self.mode_function = mode_functions[mode]
self.mode = mode
def _recall_single_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
unique_truths = {g.content for g in ground_truth_documents}
unique_retrievals = {p.content for p in retrieved_documents}
retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
return float(len(retrieved_ground_truths) > 0)
def _recall_multi_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
unique_truths = {g.content for g in ground_truth_documents}
unique_retrievals = {p.content for p in retrieved_documents}
retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
return len(retrieved_ground_truths) / len(ground_truth_documents)
@component.output_types(score=float, individual_scores=List[float])
def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
) -> Dict[str, Any]:
"""
Run the DocumentRecallEvaluator on the given inputs.
`ground_truth_documents` and `retrieved_documents` must have the same length.
:param ground_truth_documents:
A list of expected documents for each question.
:param retrieved_documents:
A list of retrieved documents for each question.
A dictionary with the following outputs:
- `score` - The average of calculated scores.
- `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents the proportion of matching
documents retrieved. If the mode is `single_hit`, the individual scores are 0 or 1.
"""
if len(ground_truth_documents) != len(retrieved_documents):
msg = "The length of ground_truth_documents and retrieved_documents must be the same."
raise ValueError(msg)
scores = []
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
score = self.mode_function(ground_truth, retrieved)
scores.append(score)
return {"score": sum(scores) / len(retrieved_documents), "individual_scores": scores}
def to_dict(self) -> Dict[str, Any]:
"""
Serializes the component to a dictionary.
:returns:
Dictionary with serialized data.
"""
return default_to_dict(self, mode=str(self.mode))
|