hc99 commited on
Commit
93b3423
·
verified ·
1 Parent(s): 4ff79c6

Add files using upload-large-folder tool

Browse files
testbed/deepset-ai__haystack/haystack/components/generators/hugging_face_api.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from dataclasses import asdict
6
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Union
7
+
8
+ from haystack import component, default_from_dict, default_to_dict, logging
9
+ from haystack.dataclasses import StreamingChunk
10
+ from haystack.lazy_imports import LazyImport
11
+ from haystack.utils import Secret, deserialize_callable, deserialize_secrets_inplace, serialize_callable
12
+ from haystack.utils.hf import HFGenerationAPIType, HFModelType, check_valid_model
13
+ from haystack.utils.url_validation import is_valid_http_url
14
+
15
+ with LazyImport(message="Run 'pip install \"huggingface_hub>=0.23.0\"'") as huggingface_hub_import:
16
+ from huggingface_hub import (
17
+ InferenceClient,
18
+ TextGenerationOutput,
19
+ TextGenerationOutputToken,
20
+ TextGenerationStreamOutput,
21
+ )
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @component
28
+ class HuggingFaceAPIGenerator:
29
+ """
30
+ Generates text using Hugging Face APIs.
31
+
32
+ Use it with the following Hugging Face APIs:
33
+ - [Free Serverless Inference API]((https://huggingface.co/inference-api)
34
+ - [Paid Inference Endpoints](https://huggingface.co/inference-endpoints)
35
+ - [Self-hosted Text Generation Inference](https://github.com/huggingface/text-generation-inference)
36
+
37
+ ### Usage examples
38
+
39
+ #### With the free serverless inference API
40
+
41
+ ```python
42
+ from haystack.components.generators import HuggingFaceAPIGenerator
43
+ from haystack.utils import Secret
44
+
45
+ generator = HuggingFaceAPIGenerator(api_type="serverless_inference_api",
46
+ api_params={"model": "HuggingFaceH4/zephyr-7b-beta"},
47
+ token=Secret.from_token("<your-api-key>"))
48
+
49
+ result = generator.run(prompt="What's Natural Language Processing?")
50
+ print(result)
51
+ ```
52
+
53
+ #### With paid inference endpoints
54
+
55
+ ```python
56
+ from haystack.components.generators import HuggingFaceAPIGenerator
57
+ from haystack.utils import Secret
58
+
59
+ generator = HuggingFaceAPIGenerator(api_type="inference_endpoints",
60
+ api_params={"url": "<your-inference-endpoint-url>"},
61
+ token=Secret.from_token("<your-api-key>"))
62
+
63
+ result = generator.run(prompt="What's Natural Language Processing?")
64
+ print(result)
65
+
66
+ #### With self-hosted text generation inference
67
+ ```python
68
+ from haystack.components.generators import HuggingFaceAPIGenerator
69
+
70
+ generator = HuggingFaceAPIGenerator(api_type="text_generation_inference",
71
+ api_params={"url": "http://localhost:8080"})
72
+
73
+ result = generator.run(prompt="What's Natural Language Processing?")
74
+ print(result)
75
+ ```
76
+ """
77
+
78
+ def __init__( # pylint: disable=too-many-positional-arguments
79
+ self,
80
+ api_type: Union[HFGenerationAPIType, str],
81
+ api_params: Dict[str, str],
82
+ token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
83
+ generation_kwargs: Optional[Dict[str, Any]] = None,
84
+ stop_words: Optional[List[str]] = None,
85
+ streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
86
+ ):
87
+ """
88
+ Initialize the HuggingFaceAPIGenerator instance.
89
+
90
+ :param api_type:
91
+ The type of Hugging Face API to use. Available types:
92
+ - `text_generation_inference`: See [TGI](https://github.com/huggingface/text-generation-inference).
93
+ - `inference_endpoints`: See [Inference Endpoints](https://huggingface.co/inference-endpoints).
94
+ - `serverless_inference_api`: See [Serverless Inference API](https://huggingface.co/inference-api).
95
+ :param api_params:
96
+ A dictionary with the following keys:
97
+ - `model`: Hugging Face model ID. Required when `api_type` is `SERVERLESS_INFERENCE_API`.
98
+ - `url`: URL of the inference endpoint. Required when `api_type` is `INFERENCE_ENDPOINTS` or
99
+ `TEXT_GENERATION_INFERENCE`.
100
+ :param token: The Hugging Face token to use as HTTP bearer authorization.
101
+ Check your HF token in your [account settings](https://huggingface.co/settings/tokens).
102
+ :param generation_kwargs:
103
+ A dictionary with keyword arguments to customize text generation. Some examples: `max_new_tokens`,
104
+ `temperature`, `top_k`, `top_p`.
105
+ For details, see [Hugging Face documentation](https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation)
106
+ for more information.
107
+ :param stop_words: An optional list of strings representing the stop words.
108
+ :param streaming_callback: An optional callable for handling streaming responses.
109
+ """
110
+
111
+ huggingface_hub_import.check()
112
+
113
+ if isinstance(api_type, str):
114
+ api_type = HFGenerationAPIType.from_str(api_type)
115
+
116
+ if api_type == HFGenerationAPIType.SERVERLESS_INFERENCE_API:
117
+ model = api_params.get("model")
118
+ if model is None:
119
+ raise ValueError(
120
+ "To use the Serverless Inference API, you need to specify the `model` parameter in `api_params`."
121
+ )
122
+ check_valid_model(model, HFModelType.GENERATION, token)
123
+ model_or_url = model
124
+ elif api_type in [HFGenerationAPIType.INFERENCE_ENDPOINTS, HFGenerationAPIType.TEXT_GENERATION_INFERENCE]:
125
+ url = api_params.get("url")
126
+ if url is None:
127
+ msg = (
128
+ "To use Text Generation Inference or Inference Endpoints, you need to specify the `url` "
129
+ "parameter in `api_params`."
130
+ )
131
+ raise ValueError(msg)
132
+ if not is_valid_http_url(url):
133
+ raise ValueError(f"Invalid URL: {url}")
134
+ model_or_url = url
135
+ else:
136
+ msg = f"Unknown api_type {api_type}"
137
+ raise ValueError(msg)
138
+
139
+ # handle generation kwargs setup
140
+ generation_kwargs = generation_kwargs.copy() if generation_kwargs else {}
141
+ generation_kwargs["stop_sequences"] = generation_kwargs.get("stop_sequences", [])
142
+ generation_kwargs["stop_sequences"].extend(stop_words or [])
143
+ generation_kwargs.setdefault("max_new_tokens", 512)
144
+
145
+ self.api_type = api_type
146
+ self.api_params = api_params
147
+ self.token = token
148
+ self.generation_kwargs = generation_kwargs
149
+ self.streaming_callback = streaming_callback
150
+ self._client = InferenceClient(model_or_url, token=token.resolve_value() if token else None)
151
+
152
+ def to_dict(self) -> Dict[str, Any]:
153
+ """
154
+ Serialize this component to a dictionary.
155
+
156
+ :returns:
157
+ A dictionary containing the serialized component.
158
+ """
159
+ callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None
160
+ return default_to_dict(
161
+ self,
162
+ api_type=str(self.api_type),
163
+ api_params=self.api_params,
164
+ token=self.token.to_dict() if self.token else None,
165
+ generation_kwargs=self.generation_kwargs,
166
+ streaming_callback=callback_name,
167
+ )
168
+
169
+ @classmethod
170
+ def from_dict(cls, data: Dict[str, Any]) -> "HuggingFaceAPIGenerator":
171
+ """
172
+ Deserialize this component from a dictionary.
173
+ """
174
+ deserialize_secrets_inplace(data["init_parameters"], keys=["token"])
175
+ init_params = data["init_parameters"]
176
+ serialized_callback_handler = init_params.get("streaming_callback")
177
+ if serialized_callback_handler:
178
+ init_params["streaming_callback"] = deserialize_callable(serialized_callback_handler)
179
+ return default_from_dict(cls, data)
180
+
181
+ @component.output_types(replies=List[str], meta=List[Dict[str, Any]])
182
+ def run(
183
+ self,
184
+ prompt: str,
185
+ streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
186
+ generation_kwargs: Optional[Dict[str, Any]] = None,
187
+ ):
188
+ """
189
+ Invoke the text generation inference for the given prompt and generation parameters.
190
+
191
+ :param prompt:
192
+ A string representing the prompt.
193
+ :param streaming_callback:
194
+ A callback function that is called when a new token is received from the stream.
195
+ :param generation_kwargs:
196
+ Additional keyword arguments for text generation.
197
+ :returns:
198
+ A dictionary with the generated replies and metadata. Both are lists of length n.
199
+ - replies: A list of strings representing the generated replies.
200
+ """
201
+ # update generation kwargs by merging with the default ones
202
+ generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
203
+
204
+ # check if streaming_callback is passed
205
+ streaming_callback = streaming_callback or self.streaming_callback
206
+
207
+ hf_output = self._client.text_generation(
208
+ prompt, details=True, stream=streaming_callback is not None, **generation_kwargs
209
+ )
210
+
211
+ if streaming_callback is not None:
212
+ return self._stream_and_build_response(hf_output, streaming_callback)
213
+
214
+ return self._build_non_streaming_response(hf_output)
215
+
216
+ def _stream_and_build_response(
217
+ self, hf_output: Iterable["TextGenerationStreamOutput"], streaming_callback: Callable[[StreamingChunk], None]
218
+ ):
219
+ chunks: List[StreamingChunk] = []
220
+ for chunk in hf_output:
221
+ token: TextGenerationOutputToken = chunk.token
222
+ if token.special:
223
+ continue
224
+ chunk_metadata = {**asdict(token), **(asdict(chunk.details) if chunk.details else {})}
225
+ stream_chunk = StreamingChunk(token.text, chunk_metadata)
226
+ chunks.append(stream_chunk)
227
+ streaming_callback(stream_chunk)
228
+ metadata = {
229
+ "finish_reason": chunks[-1].meta.get("finish_reason", None),
230
+ "model": self._client.model,
231
+ "usage": {"completion_tokens": chunks[-1].meta.get("generated_tokens", 0)},
232
+ }
233
+ return {"replies": ["".join([chunk.content for chunk in chunks])], "meta": [metadata]}
234
+
235
+ def _build_non_streaming_response(self, hf_output: "TextGenerationOutput"):
236
+ meta = [
237
+ {
238
+ "model": self._client.model,
239
+ "finish_reason": hf_output.details.finish_reason if hf_output.details else None,
240
+ "usage": {"completion_tokens": len(hf_output.details.tokens) if hf_output.details else 0},
241
+ }
242
+ ]
243
+ return {"replies": [hf_output.generated_text], "meta": meta}
testbed/deepset-ai__haystack/haystack/components/generators/utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.dataclasses import StreamingChunk
6
+
7
+
8
+ def print_streaming_chunk(chunk: StreamingChunk) -> None:
9
+ """
10
+ Default callback function for streaming responses.
11
+
12
+ Prints the tokens of the first completion to stdout as soon as they are received
13
+ """
14
+ print(chunk.content, flush=True, end="")
testbed/deepset-ai__haystack/haystack/components/joiners/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .answer_joiner import AnswerJoiner
6
+ from .branch import BranchJoiner
7
+ from .document_joiner import DocumentJoiner
8
+ from .string_joiner import StringJoiner
9
+
10
+ __all__ = ["DocumentJoiner", "BranchJoiner", "AnswerJoiner", "StringJoiner"]
testbed/deepset-ai__haystack/haystack/components/preprocessors/document_splitter.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from copy import deepcopy
6
+ from typing import Any, Callable, Dict, List, Literal, Optional, Tuple
7
+
8
+ from more_itertools import windowed
9
+
10
+ from haystack import Document, component, logging
11
+ from haystack.core.serialization import default_from_dict, default_to_dict
12
+ from haystack.utils import deserialize_callable, serialize_callable
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ @component
18
+ class DocumentSplitter:
19
+ """
20
+ Splits long documents into smaller chunks.
21
+
22
+ This is a common preprocessing step during indexing.
23
+ It helps Embedders create meaningful semantic representations
24
+ and prevents exceeding language model context limits.
25
+
26
+ The DocumentSplitter is compatible with the following DocumentStores:
27
+ - [Astra](https://docs.haystack.deepset.ai/docs/astradocumentstore)
28
+ - [Chroma](https://docs.haystack.deepset.ai/docs/chromadocumentstore) limited support, overlapping information is
29
+ not stored
30
+ - [Elasticsearch](https://docs.haystack.deepset.ai/docs/elasticsearch-document-store)
31
+ - [OpenSearch](https://docs.haystack.deepset.ai/docs/opensearch-document-store)
32
+ - [Pgvector](https://docs.haystack.deepset.ai/docs/pgvectordocumentstore)
33
+ - [Pinecone](https://docs.haystack.deepset.ai/docs/pinecone-document-store) limited support, overlapping
34
+ information is not stored
35
+ - [Qdrant](https://docs.haystack.deepset.ai/docs/qdrant-document-store)
36
+ - [Weaviate](https://docs.haystack.deepset.ai/docs/weaviatedocumentstore)
37
+
38
+ ### Usage example
39
+
40
+ ```python
41
+ from haystack import Document
42
+ from haystack.components.preprocessors import DocumentSplitter
43
+
44
+ doc = Document(content="Moonlight shimmered softly, wolves howled nearby, night enveloped everything.")
45
+
46
+ splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=0)
47
+ result = splitter.run(documents=[doc])
48
+ ```
49
+ """
50
+
51
+ def __init__( # pylint: disable=too-many-positional-arguments
52
+ self,
53
+ split_by: Literal["function", "page", "passage", "sentence", "word"] = "word",
54
+ split_length: int = 200,
55
+ split_overlap: int = 0,
56
+ split_threshold: int = 0,
57
+ splitting_function: Optional[Callable[[str], List[str]]] = None,
58
+ ):
59
+ """
60
+ Initialize DocumentSplitter.
61
+
62
+ :param split_by: The unit for splitting your documents. Choose from `word` for splitting by spaces (" "),
63
+ `sentence` for splitting by periods ("."), `page` for splitting by form feed ("\\f"),
64
+ or `passage` for splitting by double line breaks ("\\n\\n").
65
+ :param split_length: The maximum number of units in each split.
66
+ :param split_overlap: The number of overlapping units for each split.
67
+ :param split_threshold: The minimum number of units per split. If a split has fewer units
68
+ than the threshold, it's attached to the previous split.
69
+ :param splitting_function: Necessary when `split_by` is set to "function".
70
+ This is a function which must accept a single `str` as input and return a `list` of `str` as output,
71
+ representing the chunks after splitting.
72
+ """
73
+
74
+ self.split_by = split_by
75
+ if split_by not in ["function", "page", "passage", "sentence", "word"]:
76
+ raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.")
77
+ if split_by == "function" and splitting_function is None:
78
+ raise ValueError("When 'split_by' is set to 'function', a valid 'splitting_function' must be provided.")
79
+ if split_length <= 0:
80
+ raise ValueError("split_length must be greater than 0.")
81
+ self.split_length = split_length
82
+ if split_overlap < 0:
83
+ raise ValueError("split_overlap must be greater than or equal to 0.")
84
+ self.split_overlap = split_overlap
85
+ self.split_threshold = split_threshold
86
+ self.splitting_function = splitting_function
87
+
88
+ @component.output_types(documents=List[Document])
89
+ def run(self, documents: List[Document]):
90
+ """
91
+ Split documents into smaller parts.
92
+
93
+ Splits documents by the unit expressed in `split_by`, with a length of `split_length`
94
+ and an overlap of `split_overlap`.
95
+
96
+ :param documents: The documents to split.
97
+
98
+ :returns: A dictionary with the following key:
99
+ - `documents`: List of documents with the split texts. Each document includes:
100
+ - A metadata field `source_id` to track the original document.
101
+ - A metadata field `page_number` to track the original page number.
102
+ - All other metadata copied from the original document.
103
+
104
+ :raises TypeError: if the input is not a list of Documents.
105
+ :raises ValueError: if the content of a document is None.
106
+ """
107
+
108
+ if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
109
+ raise TypeError("DocumentSplitter expects a List of Documents as input.")
110
+
111
+ split_docs = []
112
+ for doc in documents:
113
+ if doc.content is None:
114
+ raise ValueError(
115
+ f"DocumentSplitter only works with text documents but content for document ID {doc.id} is None."
116
+ )
117
+ if doc.content == "":
118
+ logger.warning("Document ID {doc_id} has an empty content. Skipping this document.", doc_id=doc.id)
119
+ continue
120
+ units = self._split_into_units(doc.content, self.split_by)
121
+ text_splits, splits_pages, splits_start_idxs = self._concatenate_units(
122
+ units, self.split_length, self.split_overlap, self.split_threshold
123
+ )
124
+ metadata = deepcopy(doc.meta)
125
+ metadata["source_id"] = doc.id
126
+ split_docs += self._create_docs_from_splits(
127
+ text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata
128
+ )
129
+ return {"documents": split_docs}
130
+
131
+ def _split_into_units(
132
+ self, text: str, split_by: Literal["function", "page", "passage", "sentence", "word"]
133
+ ) -> List[str]:
134
+ if split_by == "page":
135
+ self.split_at = "\f"
136
+ elif split_by == "passage":
137
+ self.split_at = "\n\n"
138
+ elif split_by == "sentence":
139
+ self.split_at = "."
140
+ elif split_by == "word":
141
+ self.split_at = " "
142
+ elif split_by == "function" and self.splitting_function is not None:
143
+ return self.splitting_function(text)
144
+ else:
145
+ raise NotImplementedError(
146
+ "DocumentSplitter only supports 'function', 'page', 'passage', 'sentence' or 'word' split_by options."
147
+ )
148
+ units = text.split(self.split_at)
149
+ # Add the delimiter back to all units except the last one
150
+ for i in range(len(units) - 1):
151
+ units[i] += self.split_at
152
+ return units
153
+
154
+ def _concatenate_units(
155
+ self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int
156
+ ) -> Tuple[List[str], List[int], List[int]]:
157
+ """
158
+ Concatenates the elements into parts of split_length units.
159
+
160
+ Keeps track of the original page number that each element belongs. If the length of the current units is less
161
+ than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current
162
+ units with the last split, preventing the creation of excessively small splits.
163
+ """
164
+
165
+ text_splits: List[str] = []
166
+ splits_pages = []
167
+ splits_start_idxs = []
168
+ cur_start_idx = 0
169
+ cur_page = 1
170
+ segments = windowed(elements, n=split_length, step=split_length - split_overlap)
171
+
172
+ for seg in segments:
173
+ current_units = [unit for unit in seg if unit is not None]
174
+ txt = "".join(current_units)
175
+
176
+ # check if length of current units is below split_threshold
177
+ if len(current_units) < split_threshold and len(text_splits) > 0:
178
+ # concatenate the last split with the current one
179
+ text_splits[-1] += txt
180
+
181
+ # NOTE: This line skips documents that have content=""
182
+ elif len(txt) > 0:
183
+ text_splits.append(txt)
184
+ splits_pages.append(cur_page)
185
+ splits_start_idxs.append(cur_start_idx)
186
+
187
+ processed_units = current_units[: split_length - split_overlap]
188
+ cur_start_idx += len("".join(processed_units))
189
+
190
+ if self.split_by == "page":
191
+ num_page_breaks = len(processed_units)
192
+ else:
193
+ num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
194
+
195
+ cur_page += num_page_breaks
196
+
197
+ return text_splits, splits_pages, splits_start_idxs
198
+
199
+ def _create_docs_from_splits(
200
+ self, text_splits: List[str], splits_pages: List[int], splits_start_idxs: List[int], meta: Dict
201
+ ) -> List[Document]:
202
+ """
203
+ Creates Document objects from splits enriching them with page number and the metadata of the original document.
204
+ """
205
+ documents: List[Document] = []
206
+
207
+ for i, (txt, split_idx) in enumerate(zip(text_splits, splits_start_idxs)):
208
+ meta = deepcopy(meta)
209
+ doc = Document(content=txt, meta=meta)
210
+ doc.meta["page_number"] = splits_pages[i]
211
+ doc.meta["split_id"] = i
212
+ doc.meta["split_idx_start"] = split_idx
213
+ documents.append(doc)
214
+
215
+ if self.split_overlap <= 0:
216
+ continue
217
+
218
+ doc.meta["_split_overlap"] = []
219
+
220
+ if i == 0:
221
+ continue
222
+
223
+ doc_start_idx = splits_start_idxs[i]
224
+ previous_doc = documents[i - 1]
225
+ previous_doc_start_idx = splits_start_idxs[i - 1]
226
+ self._add_split_overlap_information(doc, doc_start_idx, previous_doc, previous_doc_start_idx)
227
+
228
+ return documents
229
+
230
+ @staticmethod
231
+ def _add_split_overlap_information(
232
+ current_doc: Document, current_doc_start_idx: int, previous_doc: Document, previous_doc_start_idx: int
233
+ ):
234
+ """
235
+ Adds split overlap information to the current and previous Document's meta.
236
+
237
+ :param current_doc: The Document that is being split.
238
+ :param current_doc_start_idx: The starting index of the current Document.
239
+ :param previous_doc: The Document that was split before the current Document.
240
+ :param previous_doc_start_idx: The starting index of the previous Document.
241
+ """
242
+ overlapping_range = (current_doc_start_idx - previous_doc_start_idx, len(previous_doc.content)) # type: ignore
243
+
244
+ if overlapping_range[0] < overlapping_range[1]:
245
+ overlapping_str = previous_doc.content[overlapping_range[0] : overlapping_range[1]] # type: ignore
246
+
247
+ if current_doc.content.startswith(overlapping_str): # type: ignore
248
+ # add split overlap information to this Document regarding the previous Document
249
+ current_doc.meta["_split_overlap"].append({"doc_id": previous_doc.id, "range": overlapping_range})
250
+
251
+ # add split overlap information to previous Document regarding this Document
252
+ overlapping_range = (0, overlapping_range[1] - overlapping_range[0])
253
+ previous_doc.meta["_split_overlap"].append({"doc_id": current_doc.id, "range": overlapping_range})
254
+
255
+ def to_dict(self) -> Dict[str, Any]:
256
+ """
257
+ Serializes the component to a dictionary.
258
+ """
259
+ serialized = default_to_dict(
260
+ self,
261
+ split_by=self.split_by,
262
+ split_length=self.split_length,
263
+ split_overlap=self.split_overlap,
264
+ split_threshold=self.split_threshold,
265
+ )
266
+ if self.splitting_function:
267
+ serialized["init_parameters"]["splitting_function"] = serialize_callable(self.splitting_function)
268
+ return serialized
269
+
270
+ @classmethod
271
+ def from_dict(cls, data: Dict[str, Any]) -> "DocumentSplitter":
272
+ """
273
+ Deserializes the component from a dictionary.
274
+ """
275
+ init_params = data.get("init_parameters", {})
276
+
277
+ splitting_function = init_params.get("splitting_function", None)
278
+ if splitting_function:
279
+ init_params["splitting_function"] = deserialize_callable(splitting_function)
280
+
281
+ return default_from_dict(cls, data)
testbed/deepset-ai__haystack/haystack/components/rankers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.components.rankers.lost_in_the_middle import LostInTheMiddleRanker
6
+ from haystack.components.rankers.meta_field import MetaFieldRanker
7
+ from haystack.components.rankers.sentence_transformers_diversity import SentenceTransformersDiversityRanker
8
+ from haystack.components.rankers.transformers_similarity import TransformersSimilarityRanker
9
+
10
+ __all__ = [
11
+ "LostInTheMiddleRanker",
12
+ "MetaFieldRanker",
13
+ "SentenceTransformersDiversityRanker",
14
+ "TransformersSimilarityRanker",
15
+ ]
testbed/deepset-ai__haystack/haystack/components/rankers/meta_field.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from collections import defaultdict
6
+ from typing import Any, Callable, Dict, List, Literal, Optional
7
+
8
+ from dateutil.parser import parse as date_parse
9
+
10
+ from haystack import Document, component, logging
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ @component
16
+ class MetaFieldRanker:
17
+ """
18
+ Ranks Documents based on the value of their specific meta field.
19
+
20
+ The ranking can be performed in descending order or ascending order.
21
+
22
+ Usage example:
23
+
24
+ ```python
25
+ from haystack import Document
26
+ from haystack.components.rankers import MetaFieldRanker
27
+
28
+ ranker = MetaFieldRanker(meta_field="rating")
29
+ docs = [
30
+ Document(content="Paris", meta={"rating": 1.3}),
31
+ Document(content="Berlin", meta={"rating": 0.7}),
32
+ Document(content="Barcelona", meta={"rating": 2.1}),
33
+ ]
34
+
35
+ output = ranker.run(documents=docs)
36
+ docs = output["documents"]
37
+ assert docs[0].content == "Barcelona"
38
+ ```
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ meta_field: str,
44
+ weight: float = 1.0,
45
+ top_k: Optional[int] = None,
46
+ ranking_mode: Literal["reciprocal_rank_fusion", "linear_score"] = "reciprocal_rank_fusion",
47
+ sort_order: Literal["ascending", "descending"] = "descending",
48
+ missing_meta: Literal["drop", "top", "bottom"] = "bottom",
49
+ meta_value_type: Optional[Literal["float", "int", "date"]] = None,
50
+ ):
51
+ """
52
+ Creates an instance of MetaFieldRanker.
53
+
54
+ :param meta_field:
55
+ The name of the meta field to rank by.
56
+ :param weight:
57
+ In range [0,1].
58
+ 0 disables ranking by a meta field.
59
+ 0.5 ranking from previous component and based on meta field have the same weight.
60
+ 1 ranking by a meta field only.
61
+ :param top_k:
62
+ The maximum number of Documents to return per query.
63
+ If not provided, the Ranker returns all documents it receives in the new ranking order.
64
+ :param ranking_mode:
65
+ The mode used to combine the Retriever's and Ranker's scores.
66
+ Possible values are 'reciprocal_rank_fusion' (default) and 'linear_score'.
67
+ Use the 'linear_score' mode only with Retrievers or Rankers that return a score in range [0,1].
68
+ :param sort_order:
69
+ Whether to sort the meta field by ascending or descending order.
70
+ Possible values are `descending` (default) and `ascending`.
71
+ :param missing_meta:
72
+ What to do with documents that are missing the sorting metadata field.
73
+ Possible values are:
74
+ - 'drop' will drop the documents entirely.
75
+ - 'top' will place the documents at the top of the metadata-sorted list
76
+ (regardless of 'ascending' or 'descending').
77
+ - 'bottom' will place the documents at the bottom of metadata-sorted list
78
+ (regardless of 'ascending' or 'descending').
79
+ :param meta_value_type:
80
+ Parse the meta value into the data type specified before sorting.
81
+ This will only work if all meta values stored under `meta_field` in the provided documents are strings.
82
+ For example, if we specified `meta_value_type="date"` then for the meta value `"date": "2015-02-01"`
83
+ we would parse the string into a datetime object and then sort the documents by date.
84
+ The available options are:
85
+ - 'float' will parse the meta values into floats.
86
+ - 'int' will parse the meta values into integers.
87
+ - 'date' will parse the meta values into datetime objects.
88
+ - 'None' (default) will do no parsing.
89
+ """
90
+
91
+ self.meta_field = meta_field
92
+ self.weight = weight
93
+ self.top_k = top_k
94
+ self.ranking_mode = ranking_mode
95
+ self.sort_order = sort_order
96
+ self.missing_meta = missing_meta
97
+ self._validate_params(
98
+ weight=self.weight,
99
+ top_k=self.top_k,
100
+ ranking_mode=self.ranking_mode,
101
+ sort_order=self.sort_order,
102
+ missing_meta=self.missing_meta,
103
+ meta_value_type=meta_value_type,
104
+ )
105
+ self.meta_value_type = meta_value_type
106
+
107
+ def _validate_params(
108
+ self,
109
+ weight: float,
110
+ top_k: Optional[int],
111
+ ranking_mode: Literal["reciprocal_rank_fusion", "linear_score"],
112
+ sort_order: Literal["ascending", "descending"],
113
+ missing_meta: Literal["drop", "top", "bottom"],
114
+ meta_value_type: Optional[Literal["float", "int", "date"]],
115
+ ):
116
+ if top_k is not None and top_k <= 0:
117
+ raise ValueError("top_k must be > 0, but got %s" % top_k)
118
+
119
+ if weight < 0 or weight > 1:
120
+ raise ValueError(
121
+ "Parameter <weight> must be in range [0,1] but is currently set to '%s'.\n'0' disables sorting by a "
122
+ "meta field, '0.5' assigns equal weight to the previous relevance scores and the meta field, and "
123
+ "'1' ranks by the meta field only.\nChange the <weight> parameter to a value in range 0 to 1 when "
124
+ "initializing the MetaFieldRanker." % weight
125
+ )
126
+
127
+ if ranking_mode not in ["reciprocal_rank_fusion", "linear_score"]:
128
+ raise ValueError(
129
+ "The value of parameter <ranking_mode> must be 'reciprocal_rank_fusion' or 'linear_score', but is "
130
+ "currently set to '%s'.\nChange the <ranking_mode> value to 'reciprocal_rank_fusion' or "
131
+ "'linear_score' when initializing the MetaFieldRanker." % ranking_mode
132
+ )
133
+
134
+ if sort_order not in ["ascending", "descending"]:
135
+ raise ValueError(
136
+ "The value of parameter <sort_order> must be 'ascending' or 'descending', "
137
+ "but is currently set to '%s'.\n"
138
+ "Change the <sort_order> value to 'ascending' or 'descending' when initializing the "
139
+ "MetaFieldRanker." % sort_order
140
+ )
141
+
142
+ if missing_meta not in ["drop", "top", "bottom"]:
143
+ raise ValueError(
144
+ "The value of parameter <missing_meta> must be 'drop', 'top', or 'bottom', "
145
+ "but is currently set to '%s'.\n"
146
+ "Change the <missing_meta> value to 'drop', 'top', or 'bottom' when initializing the "
147
+ "MetaFieldRanker." % missing_meta
148
+ )
149
+
150
+ if meta_value_type not in ["float", "int", "date", None]:
151
+ raise ValueError(
152
+ "The value of parameter <meta_value_type> must be 'float', 'int', 'date' or None but is "
153
+ "currently set to '%s'.\n"
154
+ "Change the <meta_value_type> value to 'float', 'int', 'date' or None when initializing the "
155
+ "MetaFieldRanker." % meta_value_type
156
+ )
157
+
158
+ @component.output_types(documents=List[Document])
159
+ def run(
160
+ self,
161
+ documents: List[Document],
162
+ top_k: Optional[int] = None,
163
+ weight: Optional[float] = None,
164
+ ranking_mode: Optional[Literal["reciprocal_rank_fusion", "linear_score"]] = None,
165
+ sort_order: Optional[Literal["ascending", "descending"]] = None,
166
+ missing_meta: Optional[Literal["drop", "top", "bottom"]] = None,
167
+ meta_value_type: Optional[Literal["float", "int", "date"]] = None,
168
+ ):
169
+ """
170
+ Ranks a list of Documents based on the selected meta field by:
171
+
172
+ 1. Sorting the Documents by the meta field in descending or ascending order.
173
+ 2. Merging the rankings from the previous component and based on the meta field according to ranking mode and
174
+ weight.
175
+ 3. Returning the top-k documents.
176
+
177
+ :param documents:
178
+ Documents to be ranked.
179
+ :param top_k:
180
+ The maximum number of Documents to return per query.
181
+ If not provided, the top_k provided at initialization time is used.
182
+ :param weight:
183
+ In range [0,1].
184
+ 0 disables ranking by a meta field.
185
+ 0.5 ranking from previous component and based on meta field have the same weight.
186
+ 1 ranking by a meta field only.
187
+ If not provided, the weight provided at initialization time is used.
188
+ :param ranking_mode:
189
+ (optional) The mode used to combine the Retriever's and Ranker's scores.
190
+ Possible values are 'reciprocal_rank_fusion' (default) and 'linear_score'.
191
+ Use the 'score' mode only with Retrievers or Rankers that return a score in range [0,1].
192
+ If not provided, the ranking_mode provided at initialization time is used.
193
+ :param sort_order:
194
+ Whether to sort the meta field by ascending or descending order.
195
+ Possible values are `descending` (default) and `ascending`.
196
+ If not provided, the sort_order provided at initialization time is used.
197
+ :param missing_meta:
198
+ What to do with documents that are missing the sorting metadata field.
199
+ Possible values are:
200
+ - 'drop' will drop the documents entirely.
201
+ - 'top' will place the documents at the top of the metadata-sorted list
202
+ (regardless of 'ascending' or 'descending').
203
+ - 'bottom' will place the documents at the bottom of metadata-sorted list
204
+ (regardless of 'ascending' or 'descending').
205
+ If not provided, the missing_meta provided at initialization time is used.
206
+ :param meta_value_type:
207
+ Parse the meta value into the data type specified before sorting.
208
+ This will only work if all meta values stored under `meta_field` in the provided documents are strings.
209
+ For example, if we specified `meta_value_type="date"` then for the meta value `"date": "2015-02-01"`
210
+ we would parse the string into a datetime object and then sort the documents by date.
211
+ The available options are:
212
+ -'float' will parse the meta values into floats.
213
+ -'int' will parse the meta values into integers.
214
+ -'date' will parse the meta values into datetime objects.
215
+ -'None' (default) will do no parsing.
216
+ :returns:
217
+ A dictionary with the following keys:
218
+ - `documents`: List of Documents sorted by the specified meta field.
219
+
220
+ :raises ValueError:
221
+ If `top_k` is not > 0.
222
+ If `weight` is not in range [0,1].
223
+ If `ranking_mode` is not 'reciprocal_rank_fusion' or 'linear_score'.
224
+ If `sort_order` is not 'ascending' or 'descending'.
225
+ If `meta_value_type` is not 'float', 'int', 'date' or `None`.
226
+ """
227
+ if not documents:
228
+ return {"documents": []}
229
+
230
+ top_k = top_k or self.top_k
231
+ weight = weight if weight is not None else self.weight
232
+ ranking_mode = ranking_mode or self.ranking_mode
233
+ sort_order = sort_order or self.sort_order
234
+ missing_meta = missing_meta or self.missing_meta
235
+ meta_value_type = meta_value_type or self.meta_value_type
236
+ self._validate_params(
237
+ weight=weight,
238
+ top_k=top_k,
239
+ ranking_mode=ranking_mode,
240
+ sort_order=sort_order,
241
+ missing_meta=missing_meta,
242
+ meta_value_type=meta_value_type,
243
+ )
244
+
245
+ # If the weight is 0 then ranking by meta field is disabled and the original documents should be returned
246
+ if weight == 0:
247
+ return {"documents": documents[:top_k]}
248
+
249
+ docs_with_meta_field = [doc for doc in documents if self.meta_field in doc.meta]
250
+ docs_missing_meta_field = [doc for doc in documents if self.meta_field not in doc.meta]
251
+
252
+ # If all docs are missing self.meta_field return original documents
253
+ if len(docs_with_meta_field) == 0:
254
+ logger.warning(
255
+ "The parameter <meta_field> is currently set to '{meta_field}', but none of the provided "
256
+ "Documents with IDs {document_ids} have this meta key.\n"
257
+ "Set <meta_field> to the name of a field that is present within the provided Documents.\n"
258
+ "Returning the <top_k> of the original Documents since there are no values to rank.",
259
+ meta_field=self.meta_field,
260
+ document_ids=",".join([doc.id for doc in documents]),
261
+ )
262
+ return {"documents": documents[:top_k]}
263
+
264
+ if len(docs_missing_meta_field) > 0:
265
+ warning_start = (
266
+ f"The parameter <meta_field> is currently set to '{self.meta_field}' but the Documents "
267
+ f"with IDs {','.join([doc.id for doc in docs_missing_meta_field])} don't have this meta key.\n"
268
+ )
269
+
270
+ if missing_meta == "bottom":
271
+ logger.warning(
272
+ "{warning_start}Because the parameter <missing_meta> is set to 'bottom', these Documents will be "
273
+ "placed at the end of the sorting order.",
274
+ warning_start=warning_start,
275
+ )
276
+ elif missing_meta == "top":
277
+ logger.warning(
278
+ "{warning_start}Because the parameter <missing_meta> is set to 'top', these Documents will be "
279
+ "placed at the top of the sorting order.",
280
+ warning_start=warning_start,
281
+ )
282
+ else:
283
+ logger.warning(
284
+ "{warning_start}Because the parameter <missing_meta> is set to 'drop', these Documents will be "
285
+ "removed from the list of retrieved Documents.",
286
+ warning_start=warning_start,
287
+ )
288
+
289
+ # If meta_value_type is provided try to parse the meta values
290
+ parsed_meta = self._parse_meta(docs_with_meta_field=docs_with_meta_field, meta_value_type=meta_value_type)
291
+ tuple_parsed_meta_and_docs = list(zip(parsed_meta, docs_with_meta_field))
292
+
293
+ # Sort the documents by self.meta_field
294
+ reverse = sort_order == "descending"
295
+ try:
296
+ tuple_sorted_by_meta = sorted(tuple_parsed_meta_and_docs, key=lambda x: x[0], reverse=reverse)
297
+ except TypeError as error:
298
+ # Return original documents if mixed types that are not comparable are returned (e.g. int and list)
299
+ logger.warning(
300
+ "Tried to sort Documents with IDs {document_ids}, but got TypeError with the message: {error}\n"
301
+ "Returning the <top_k> of the original Documents since meta field ranking is not possible.",
302
+ document_ids=",".join([doc.id for doc in docs_with_meta_field]),
303
+ error=error,
304
+ )
305
+ return {"documents": documents[:top_k]}
306
+
307
+ # Merge rankings and handle missing meta fields as specified in the missing_meta parameter
308
+ sorted_by_meta = [doc for meta, doc in tuple_sorted_by_meta]
309
+ if missing_meta == "bottom":
310
+ sorted_documents = sorted_by_meta + docs_missing_meta_field
311
+ sorted_documents = self._merge_rankings(documents, sorted_documents, weight, ranking_mode)
312
+ elif missing_meta == "top":
313
+ sorted_documents = docs_missing_meta_field + sorted_by_meta
314
+ sorted_documents = self._merge_rankings(documents, sorted_documents, weight, ranking_mode)
315
+ else:
316
+ sorted_documents = sorted_by_meta
317
+ sorted_documents = self._merge_rankings(docs_with_meta_field, sorted_documents, weight, ranking_mode)
318
+
319
+ return {"documents": sorted_documents[:top_k]}
320
+
321
+ def _parse_meta(
322
+ self, docs_with_meta_field: List[Document], meta_value_type: Optional[Literal["float", "int", "date"]]
323
+ ) -> List[Any]:
324
+ """
325
+ Parse the meta values stored under `self.meta_field` for the Documents provided in `docs_with_meta_field`.
326
+ """
327
+ if meta_value_type is None:
328
+ return [d.meta[self.meta_field] for d in docs_with_meta_field]
329
+
330
+ unique_meta_values = {doc.meta[self.meta_field] for doc in docs_with_meta_field}
331
+ if not all(isinstance(meta_value, str) for meta_value in unique_meta_values):
332
+ logger.warning(
333
+ "The parameter <meta_value_type> is currently set to '{meta_field}', but not all of meta values in the "
334
+ "provided Documents with IDs {document_ids} are strings.\n"
335
+ "Skipping parsing of the meta values.\n"
336
+ "Set all meta values found under the <meta_field> parameter to strings to use <meta_value_type>.",
337
+ meta_field=meta_value_type,
338
+ document_ids=",".join([doc.id for doc in docs_with_meta_field]),
339
+ )
340
+ return [d.meta[self.meta_field] for d in docs_with_meta_field]
341
+
342
+ parse_fn: Callable
343
+ if meta_value_type == "float":
344
+ parse_fn = float
345
+ elif meta_value_type == "int":
346
+ parse_fn = int
347
+ else:
348
+ parse_fn = date_parse
349
+
350
+ try:
351
+ meta_values = [parse_fn(d.meta[self.meta_field]) for d in docs_with_meta_field]
352
+ except ValueError as error:
353
+ logger.warning(
354
+ "Tried to parse the meta values of Documents with IDs {document_ids}, but got ValueError with the "
355
+ "message: {error}\n"
356
+ "Skipping parsing of the meta values.",
357
+ document_ids=",".join([doc.id for doc in docs_with_meta_field]),
358
+ error=error,
359
+ )
360
+ meta_values = [d.meta[self.meta_field] for d in docs_with_meta_field]
361
+
362
+ return meta_values
363
+
364
+ def _merge_rankings(
365
+ self,
366
+ documents: List[Document],
367
+ sorted_documents: List[Document],
368
+ weight: float,
369
+ ranking_mode: Literal["reciprocal_rank_fusion", "linear_score"],
370
+ ) -> List[Document]:
371
+ """
372
+ Merge the two different rankings for Documents sorted both by their content and by their meta field.
373
+ """
374
+ scores_map: Dict = defaultdict(int)
375
+
376
+ if ranking_mode == "reciprocal_rank_fusion":
377
+ for i, (document, sorted_doc) in enumerate(zip(documents, sorted_documents)):
378
+ scores_map[document.id] += self._calculate_rrf(rank=i) * (1 - weight)
379
+ scores_map[sorted_doc.id] += self._calculate_rrf(rank=i) * weight
380
+ elif ranking_mode == "linear_score":
381
+ for i, (document, sorted_doc) in enumerate(zip(documents, sorted_documents)):
382
+ score = float(0)
383
+ if document.score is None:
384
+ logger.warning("The score wasn't provided; defaulting to 0.")
385
+ elif document.score < 0 or document.score > 1:
386
+ logger.warning(
387
+ "The score {score} for Document {document_id} is outside the [0,1] range; defaulting to 0",
388
+ score=document.score,
389
+ document_id=document.id,
390
+ )
391
+ else:
392
+ score = document.score
393
+
394
+ scores_map[document.id] += score * (1 - weight)
395
+ scores_map[sorted_doc.id] += self._calc_linear_score(rank=i, amount=len(sorted_documents)) * weight
396
+
397
+ for document in documents:
398
+ document.score = scores_map[document.id]
399
+
400
+ new_sorted_documents = sorted(documents, key=lambda doc: doc.score if doc.score else -1, reverse=True)
401
+ return new_sorted_documents
402
+
403
+ @staticmethod
404
+ def _calculate_rrf(rank: int, k: int = 61) -> float:
405
+ """
406
+ Calculates the reciprocal rank fusion.
407
+
408
+ The constant K is set to 61 (60 was suggested by the original paper, plus 1 as python lists are 0-based and
409
+ the [paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) used 1-based ranking).
410
+ """
411
+ return 1 / (k + rank)
412
+
413
+ @staticmethod
414
+ def _calc_linear_score(rank: int, amount: int) -> float:
415
+ """
416
+ Calculate the meta field score as a linear score between the greatest and the lowest score in the list.
417
+
418
+ This linear scaling is useful for:
419
+ - Reducing the effect of outliers
420
+ - Creating scores that are meaningfully distributed in the range [0,1],
421
+ similar to scores coming from a Retriever or Ranker.
422
+ """
423
+ return (amount - rank) / amount
testbed/deepset-ai__haystack/haystack/components/rankers/sentence_transformers_diversity.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, Dict, List, Literal, Optional
6
+
7
+ from haystack import Document, component, default_from_dict, default_to_dict, logging
8
+ from haystack.lazy_imports import LazyImport
9
+ from haystack.utils import ComponentDevice, Secret, deserialize_secrets_inplace
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ with LazyImport(message="Run 'pip install \"sentence-transformers>=3.0.0\"'") as torch_and_sentence_transformers_import:
15
+ import torch
16
+ from sentence_transformers import SentenceTransformer
17
+
18
+
19
+ @component
20
+ class SentenceTransformersDiversityRanker:
21
+ """
22
+ A Diversity Ranker based on Sentence Transformers.
23
+
24
+ Implements a document ranking algorithm that orders documents in such a way as to maximize the overall diversity
25
+ of the documents.
26
+
27
+ This component provides functionality to rank a list of documents based on their similarity with respect to the
28
+ query to maximize the overall diversity. It uses a pre-trained Sentence Transformers model to embed the query and
29
+ the Documents.
30
+
31
+ Usage example:
32
+ ```python
33
+ from haystack import Document
34
+ from haystack.components.rankers import SentenceTransformersDiversityRanker
35
+
36
+ ranker = SentenceTransformersDiversityRanker(model="sentence-transformers/all-MiniLM-L6-v2", similarity="cosine")
37
+ ranker.warm_up()
38
+
39
+ docs = [Document(content="Paris"), Document(content="Berlin")]
40
+ query = "What is the capital of germany?"
41
+ output = ranker.run(query=query, documents=docs)
42
+ docs = output["documents"]
43
+ ```
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ model: str = "sentence-transformers/all-MiniLM-L6-v2",
49
+ top_k: int = 10,
50
+ device: Optional[ComponentDevice] = None,
51
+ token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
52
+ similarity: Literal["dot_product", "cosine"] = "cosine",
53
+ query_prefix: str = "",
54
+ query_suffix: str = "",
55
+ document_prefix: str = "",
56
+ document_suffix: str = "",
57
+ meta_fields_to_embed: Optional[List[str]] = None,
58
+ embedding_separator: str = "\n",
59
+ ):
60
+ """
61
+ Initialize a SentenceTransformersDiversityRanker.
62
+
63
+ :param model: Local path or name of the model in Hugging Face's model hub,
64
+ such as `'sentence-transformers/all-MiniLM-L6-v2'`.
65
+ :param top_k: The maximum number of Documents to return per query.
66
+ :param device: The device on which the model is loaded. If `None`, the default device is automatically
67
+ selected.
68
+ :param token: The API token used to download private models from Hugging Face.
69
+ :param similarity: Similarity metric for comparing embeddings. Can be set to "dot_product" (default) or
70
+ "cosine".
71
+ :param query_prefix: A string to add to the beginning of the query text before ranking.
72
+ Can be used to prepend the text with an instruction, as required by some embedding models,
73
+ such as E5 and BGE.
74
+ :param query_suffix: A string to add to the end of the query text before ranking.
75
+ :param document_prefix: A string to add to the beginning of each Document text before ranking.
76
+ Can be used to prepend the text with an instruction, as required by some embedding models,
77
+ such as E5 and BGE.
78
+ :param document_suffix: A string to add to the end of each Document text before ranking.
79
+ :param meta_fields_to_embed: List of meta fields that should be embedded along with the Document content.
80
+ :param embedding_separator: Separator used to concatenate the meta fields to the Document content.
81
+ """
82
+ torch_and_sentence_transformers_import.check()
83
+
84
+ self.model_name_or_path = model
85
+ if top_k is None or top_k <= 0:
86
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
87
+ self.top_k = top_k
88
+ self.device = ComponentDevice.resolve_device(device)
89
+ self.token = token
90
+ self.model = None
91
+ if similarity not in ["dot_product", "cosine"]:
92
+ raise ValueError(f"Similarity must be one of 'dot_product' or 'cosine', but got {similarity}.")
93
+ self.similarity = similarity
94
+ self.query_prefix = query_prefix
95
+ self.document_prefix = document_prefix
96
+ self.query_suffix = query_suffix
97
+ self.document_suffix = document_suffix
98
+ self.meta_fields_to_embed = meta_fields_to_embed or []
99
+ self.embedding_separator = embedding_separator
100
+
101
+ def warm_up(self):
102
+ """
103
+ Initializes the component.
104
+ """
105
+ if self.model is None:
106
+ self.model = SentenceTransformer(
107
+ model_name_or_path=self.model_name_or_path,
108
+ device=self.device.to_torch_str(),
109
+ use_auth_token=self.token.resolve_value() if self.token else None,
110
+ )
111
+
112
+ def to_dict(self) -> Dict[str, Any]:
113
+ """
114
+ Serializes the component to a dictionary.
115
+
116
+ :returns:
117
+ Dictionary with serialized data.
118
+ """
119
+ return default_to_dict(
120
+ self,
121
+ model=self.model_name_or_path,
122
+ device=self.device.to_dict(),
123
+ token=self.token.to_dict() if self.token else None,
124
+ top_k=self.top_k,
125
+ similarity=self.similarity,
126
+ query_prefix=self.query_prefix,
127
+ document_prefix=self.document_prefix,
128
+ query_suffix=self.query_suffix,
129
+ document_suffix=self.document_suffix,
130
+ meta_fields_to_embed=self.meta_fields_to_embed,
131
+ embedding_separator=self.embedding_separator,
132
+ )
133
+
134
+ @classmethod
135
+ def from_dict(cls, data: Dict[str, Any]) -> "SentenceTransformersDiversityRanker":
136
+ """
137
+ Deserializes the component from a dictionary.
138
+
139
+ :param data:
140
+ The dictionary to deserialize from.
141
+ :returns:
142
+ The deserialized component.
143
+ """
144
+ init_params = data["init_parameters"]
145
+ if init_params.get("device") is not None:
146
+ init_params["device"] = ComponentDevice.from_dict(init_params["device"])
147
+ deserialize_secrets_inplace(init_params, keys=["token"])
148
+ return default_from_dict(cls, data)
149
+
150
+ def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
151
+ """
152
+ Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.
153
+ """
154
+ texts_to_embed = []
155
+ for doc in documents:
156
+ meta_values_to_embed = [
157
+ str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key]
158
+ ]
159
+ text_to_embed = (
160
+ self.document_prefix
161
+ + self.embedding_separator.join(meta_values_to_embed + [doc.content or ""])
162
+ + self.document_suffix
163
+ )
164
+ texts_to_embed.append(text_to_embed)
165
+
166
+ return texts_to_embed
167
+
168
+ def _greedy_diversity_order(self, query: str, documents: List[Document]) -> List[Document]:
169
+ """
170
+ Orders the given list of documents to maximize diversity.
171
+
172
+ The algorithm first calculates embeddings for each document and the query. It starts by selecting the document
173
+ that is semantically closest to the query. Then, for each remaining document, it selects the one that, on
174
+ average, is least similar to the already selected documents. This process continues until all documents are
175
+ selected, resulting in a list where each subsequent document contributes the most to the overall diversity of
176
+ the selected set.
177
+
178
+ :param query: The search query.
179
+ :param documents: The list of Document objects to be ranked.
180
+
181
+ :return: A list of documents ordered to maximize diversity.
182
+ """
183
+ texts_to_embed = self._prepare_texts_to_embed(documents)
184
+
185
+ # Calculate embeddings
186
+ doc_embeddings = self.model.encode(texts_to_embed, convert_to_tensor=True) # type: ignore[attr-defined]
187
+ query_embedding = self.model.encode([self.query_prefix + query + self.query_suffix], convert_to_tensor=True) # type: ignore[attr-defined]
188
+
189
+ # Normalize embeddings to unit length for computing cosine similarity
190
+ if self.similarity == "cosine":
191
+ doc_embeddings /= torch.norm(doc_embeddings, p=2, dim=-1).unsqueeze(-1)
192
+ query_embedding /= torch.norm(query_embedding, p=2, dim=-1).unsqueeze(-1)
193
+
194
+ n = len(documents)
195
+ selected: List[int] = []
196
+
197
+ # Compute the similarity vector between the query and documents
198
+ query_doc_sim = query_embedding @ doc_embeddings.T
199
+
200
+ # Start with the document with the highest similarity to the query
201
+ selected.append(int(torch.argmax(query_doc_sim).item()))
202
+
203
+ selected_sum = doc_embeddings[selected[0]] / n
204
+
205
+ while len(selected) < n:
206
+ # Compute mean of dot products of all selected documents and all other documents
207
+ similarities = selected_sum @ doc_embeddings.T
208
+ # Mask documents that are already selected
209
+ similarities[selected] = torch.inf
210
+ # Select the document with the lowest total similarity score
211
+ index_unselected = int(torch.argmin(similarities).item())
212
+ selected.append(index_unselected)
213
+ # It's enough just to add to the selected vectors because dot product is distributive
214
+ # It's divided by n for numerical stability
215
+ selected_sum += doc_embeddings[index_unselected] / n
216
+
217
+ ranked_docs: List[Document] = [documents[i] for i in selected]
218
+
219
+ return ranked_docs
220
+
221
+ @component.output_types(documents=List[Document])
222
+ def run(self, query: str, documents: List[Document], top_k: Optional[int] = None):
223
+ """
224
+ Rank the documents based on their diversity.
225
+
226
+ :param query: The search query.
227
+ :param documents: List of Document objects to be ranker.
228
+ :param top_k: Optional. An integer to override the top_k set during initialization.
229
+
230
+ :returns: A dictionary with the following key:
231
+ - `documents`: List of Document objects that have been selected based on the diversity ranking.
232
+
233
+ :raises ValueError: If the top_k value is less than or equal to 0.
234
+ :raises RuntimeError: If the component has not been warmed up.
235
+ """
236
+ if self.model is None:
237
+ error_msg = (
238
+ "The component SentenceTransformersDiversityRanker wasn't warmed up. "
239
+ "Run 'warm_up()' before calling 'run()'."
240
+ )
241
+ raise RuntimeError(error_msg)
242
+
243
+ if not documents:
244
+ return {"documents": []}
245
+
246
+ if top_k is None:
247
+ top_k = self.top_k
248
+ elif top_k <= 0:
249
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
250
+
251
+ diversity_sorted = self._greedy_diversity_order(query=query, documents=documents)
252
+
253
+ return {"documents": diversity_sorted[:top_k]}
testbed/deepset-ai__haystack/haystack/components/rankers/transformers_similarity.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from pathlib import Path
6
+ from typing import Any, Dict, List, Optional, Union
7
+
8
+ from haystack import Document, component, default_from_dict, default_to_dict, logging
9
+ from haystack.lazy_imports import LazyImport
10
+ from haystack.utils import ComponentDevice, DeviceMap, Secret, deserialize_secrets_inplace
11
+ from haystack.utils.hf import deserialize_hf_model_kwargs, resolve_hf_device_map, serialize_hf_model_kwargs
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ with LazyImport(message="Run 'pip install transformers[torch,sentencepiece]'") as torch_and_transformers_import:
17
+ import accelerate # pylint: disable=unused-import # the library is used but not directly referenced
18
+ import torch
19
+ from torch.utils.data import DataLoader, Dataset
20
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
21
+
22
+
23
+ @component
24
+ class TransformersSimilarityRanker:
25
+ """
26
+ Ranks documents based on their semantic similarity to the query.
27
+
28
+ It uses a pre-trained cross-encoder model from Hugging Face to embed the query and the documents.
29
+
30
+ ### Usage example
31
+
32
+ ```python
33
+ from haystack import Document
34
+ from haystack.components.rankers import TransformersSimilarityRanker
35
+
36
+ ranker = TransformersSimilarityRanker()
37
+ docs = [Document(content="Paris"), Document(content="Berlin")]
38
+ query = "City in Germany"
39
+ ranker.warm_up()
40
+ result = ranker.run(query=query, documents=docs)
41
+ docs = result["documents"]
42
+ print(docs[0].content)
43
+ ```
44
+ """
45
+
46
+ def __init__( # noqa: PLR0913
47
+ self,
48
+ model: Union[str, Path] = "cross-encoder/ms-marco-MiniLM-L-6-v2",
49
+ device: Optional[ComponentDevice] = None,
50
+ token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
51
+ top_k: int = 10,
52
+ query_prefix: str = "",
53
+ document_prefix: str = "",
54
+ meta_fields_to_embed: Optional[List[str]] = None,
55
+ embedding_separator: str = "\n",
56
+ scale_score: bool = True,
57
+ calibration_factor: Optional[float] = 1.0,
58
+ score_threshold: Optional[float] = None,
59
+ model_kwargs: Optional[Dict[str, Any]] = None,
60
+ tokenizer_kwargs: Optional[Dict[str, Any]] = None,
61
+ batch_size: int = 16,
62
+ ):
63
+ """
64
+ Creates an instance of TransformersSimilarityRanker.
65
+
66
+ :param model:
67
+ The ranking model. Pass a local path or the Hugging Face model name of a cross-encoder model.
68
+ :param device:
69
+ The device on which the model is loaded. If `None`, overrides the default device.
70
+ :param token:
71
+ The API token to download private models from Hugging Face.
72
+ :param top_k:
73
+ The maximum number of documents to return per query.
74
+ :param query_prefix:
75
+ A string to add at the beginning of the query text before ranking.
76
+ Use it to prepend the text with an instruction, as required by reranking models like `bge`.
77
+ :param document_prefix:
78
+ A string to add at the beginning of each document before ranking. You can use it to prepend the document
79
+ with an instruction, as required by embedding models like `bge`.
80
+ :param meta_fields_to_embed:
81
+ List of metadata fields to embed with the document.
82
+ :param embedding_separator:
83
+ Separator to concatenate metadata fields to the document.
84
+ :param scale_score:
85
+ If `True`, scales the raw logit predictions using a Sigmoid activation function.
86
+ If `False`, disables scaling of the raw logit predictions.
87
+ :param calibration_factor:
88
+ Use this factor to calibrate probabilities with `sigmoid(logits * calibration_factor)`.
89
+ Used only if `scale_score` is `True`.
90
+ :param score_threshold:
91
+ Use it to return documents with a score above this threshold only.
92
+ :param model_kwargs:
93
+ Additional keyword arguments for `AutoModelForSequenceClassification.from_pretrained`
94
+ when loading the model. Refer to specific model documentation for available kwargs.
95
+ :param tokenizer_kwargs:
96
+ Additional keyword arguments for `AutoTokenizer.from_pretrained` when loading the tokenizer.
97
+ Refer to specific model documentation for available kwargs.
98
+ :param batch_size:
99
+ The batch size to use for inference. The higher the batch size, the more memory is required.
100
+ If you run into memory issues, reduce the batch size.
101
+
102
+ :raises ValueError:
103
+ If `top_k` is not > 0.
104
+ If `scale_score` is True and `calibration_factor` is not provided.
105
+ """
106
+ torch_and_transformers_import.check()
107
+
108
+ self.model_name_or_path = str(model)
109
+ self.model = None
110
+ self.query_prefix = query_prefix
111
+ self.document_prefix = document_prefix
112
+ self.tokenizer = None
113
+ self.device = None
114
+ self.top_k = top_k
115
+ self.token = token
116
+ self.meta_fields_to_embed = meta_fields_to_embed or []
117
+ self.embedding_separator = embedding_separator
118
+ self.scale_score = scale_score
119
+ self.calibration_factor = calibration_factor
120
+ self.score_threshold = score_threshold
121
+
122
+ model_kwargs = resolve_hf_device_map(device=device, model_kwargs=model_kwargs)
123
+ self.model_kwargs = model_kwargs
124
+ self.tokenizer_kwargs = tokenizer_kwargs or {}
125
+ self.batch_size = batch_size
126
+
127
+ # Parameter validation
128
+ if self.scale_score and self.calibration_factor is None:
129
+ raise ValueError(
130
+ f"scale_score is True so calibration_factor must be provided, but got {calibration_factor}"
131
+ )
132
+
133
+ if self.top_k <= 0:
134
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
135
+
136
+ def _get_telemetry_data(self) -> Dict[str, Any]:
137
+ """
138
+ Data that is sent to Posthog for usage analytics.
139
+ """
140
+ return {"model": self.model_name_or_path}
141
+
142
+ def warm_up(self):
143
+ """
144
+ Initializes the component.
145
+ """
146
+ if self.model is None:
147
+ self.model = AutoModelForSequenceClassification.from_pretrained(
148
+ self.model_name_or_path, token=self.token.resolve_value() if self.token else None, **self.model_kwargs
149
+ )
150
+ self.tokenizer = AutoTokenizer.from_pretrained(
151
+ self.model_name_or_path,
152
+ token=self.token.resolve_value() if self.token else None,
153
+ **self.tokenizer_kwargs,
154
+ )
155
+ self.device = ComponentDevice.from_multiple(device_map=DeviceMap.from_hf(self.model.hf_device_map))
156
+
157
+ def to_dict(self) -> Dict[str, Any]:
158
+ """
159
+ Serializes the component to a dictionary.
160
+
161
+ :returns:
162
+ Dictionary with serialized data.
163
+ """
164
+ serialization_dict = default_to_dict(
165
+ self,
166
+ device=None,
167
+ model=self.model_name_or_path,
168
+ token=self.token.to_dict() if self.token else None,
169
+ top_k=self.top_k,
170
+ query_prefix=self.query_prefix,
171
+ document_prefix=self.document_prefix,
172
+ meta_fields_to_embed=self.meta_fields_to_embed,
173
+ embedding_separator=self.embedding_separator,
174
+ scale_score=self.scale_score,
175
+ calibration_factor=self.calibration_factor,
176
+ score_threshold=self.score_threshold,
177
+ model_kwargs=self.model_kwargs,
178
+ tokenizer_kwargs=self.tokenizer_kwargs,
179
+ )
180
+
181
+ serialize_hf_model_kwargs(serialization_dict["init_parameters"]["model_kwargs"])
182
+ return serialization_dict
183
+
184
+ @classmethod
185
+ def from_dict(cls, data: Dict[str, Any]) -> "TransformersSimilarityRanker":
186
+ """
187
+ Deserializes the component from a dictionary.
188
+
189
+ :param data:
190
+ Dictionary to deserialize from.
191
+ :returns:
192
+ Deserialized component.
193
+ """
194
+ init_params = data["init_parameters"]
195
+ if init_params.get("device") is not None:
196
+ init_params["device"] = ComponentDevice.from_dict(init_params["device"])
197
+ if init_params.get("model_kwargs") is not None:
198
+ deserialize_hf_model_kwargs(init_params["model_kwargs"])
199
+ deserialize_secrets_inplace(init_params, keys=["token"])
200
+
201
+ return default_from_dict(cls, data)
202
+
203
+ @component.output_types(documents=List[Document])
204
+ def run(
205
+ self,
206
+ query: str,
207
+ documents: List[Document],
208
+ top_k: Optional[int] = None,
209
+ scale_score: Optional[bool] = None,
210
+ calibration_factor: Optional[float] = None,
211
+ score_threshold: Optional[float] = None,
212
+ ):
213
+ """
214
+ Returns a list of documents ranked by their similarity to the given query.
215
+
216
+ :param query:
217
+ The input query to compare the documents to.
218
+ :param documents:
219
+ A list of documents to be ranked.
220
+ :param top_k:
221
+ The maximum number of documents to return.
222
+ :param scale_score:
223
+ If `True`, scales the raw logit predictions using a Sigmoid activation function.
224
+ If `False`, disables scaling of the raw logit predictions.
225
+ :param calibration_factor:
226
+ Use this factor to calibrate probabilities with `sigmoid(logits * calibration_factor)`.
227
+ Used only if `scale_score` is `True`.
228
+ :param score_threshold:
229
+ Use it to return documents only with a score above this threshold.
230
+ :returns:
231
+ A dictionary with the following keys:
232
+ - `documents`: A list of documents closest to the query, sorted from most similar to least similar.
233
+
234
+ :raises ValueError:
235
+ If `top_k` is not > 0.
236
+ If `scale_score` is True and `calibration_factor` is not provided.
237
+ :raises RuntimeError:
238
+ If the model is not loaded because `warm_up()` was not called before.
239
+ """
240
+ # If a model path is provided but the model isn't loaded
241
+ if self.model is None:
242
+ raise RuntimeError(
243
+ "The component TransformersSimilarityRanker wasn't warmed up. Run 'warm_up()' before calling 'run()'."
244
+ )
245
+
246
+ if not documents:
247
+ return {"documents": []}
248
+
249
+ top_k = top_k or self.top_k
250
+ scale_score = scale_score or self.scale_score
251
+ calibration_factor = calibration_factor or self.calibration_factor
252
+ score_threshold = score_threshold or self.score_threshold
253
+
254
+ if top_k <= 0:
255
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
256
+
257
+ if scale_score and calibration_factor is None:
258
+ raise ValueError(
259
+ f"scale_score is True so calibration_factor must be provided, but got {calibration_factor}"
260
+ )
261
+
262
+ query_doc_pairs = []
263
+ for doc in documents:
264
+ meta_values_to_embed = [
265
+ str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key]
266
+ ]
267
+ text_to_embed = self.embedding_separator.join(meta_values_to_embed + [doc.content or ""])
268
+ query_doc_pairs.append([self.query_prefix + query, self.document_prefix + text_to_embed])
269
+
270
+ class _Dataset(Dataset):
271
+ def __init__(self, batch_encoding):
272
+ self.batch_encoding = batch_encoding
273
+
274
+ def __len__(self):
275
+ return len(self.batch_encoding["input_ids"])
276
+
277
+ def __getitem__(self, item):
278
+ return {key: self.batch_encoding.data[key][item] for key in self.batch_encoding.data.keys()}
279
+
280
+ batch_enc = self.tokenizer(query_doc_pairs, padding=True, truncation=True, return_tensors="pt").to( # type: ignore
281
+ self.device.first_device.to_torch()
282
+ )
283
+ dataset = _Dataset(batch_enc)
284
+ inp_dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False)
285
+
286
+ similarity_scores = []
287
+ with torch.inference_mode():
288
+ for features in inp_dataloader:
289
+ model_preds = self.model(**features).logits.squeeze(dim=1) # type: ignore
290
+ similarity_scores.extend(model_preds)
291
+ similarity_scores = torch.stack(similarity_scores)
292
+
293
+ if scale_score:
294
+ similarity_scores = torch.sigmoid(similarity_scores * calibration_factor)
295
+
296
+ _, sorted_indices = torch.sort(similarity_scores, descending=True)
297
+
298
+ sorted_indices = sorted_indices.cpu().tolist() # type: ignore
299
+ similarity_scores = similarity_scores.cpu().tolist()
300
+ ranked_docs = []
301
+ for sorted_index in sorted_indices:
302
+ i = sorted_index
303
+ documents[i].score = similarity_scores[i]
304
+ ranked_docs.append(documents[i])
305
+
306
+ if score_threshold is not None:
307
+ ranked_docs = [doc for doc in ranked_docs if doc.score >= score_threshold]
308
+
309
+ return {"documents": ranked_docs[:top_k]}
testbed/deepset-ai__haystack/haystack/components/readers/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.components.readers.extractive import ExtractiveReader
6
+
7
+ __all__ = ["ExtractiveReader"]
testbed/deepset-ai__haystack/haystack/components/retrievers/filter_retriever.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from haystack import Document, component, default_from_dict, default_to_dict, logging
8
+ from haystack.document_stores.types import DocumentStore
9
+ from haystack.utils import deserialize_document_store_in_init_params_inplace
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @component
15
+ class FilterRetriever:
16
+ """
17
+ Retrieves documents that match the provided filters.
18
+
19
+ ### Usage example
20
+
21
+ ```python
22
+ from haystack import Document
23
+ from haystack.components.retrievers import FilterRetriever
24
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
25
+
26
+ docs = [
27
+ Document(content="Python is a popular programming language", meta={"lang": "en"}),
28
+ Document(content="python ist eine beliebte Programmiersprache", meta={"lang": "de"}),
29
+ ]
30
+
31
+ doc_store = InMemoryDocumentStore()
32
+ doc_store.write_documents(docs)
33
+ retriever = FilterRetriever(doc_store, filters={"field": "lang", "operator": "==", "value": "en"})
34
+
35
+ # if passed in the run method, filters override those provided at initialization
36
+ result = retriever.run(filters={"field": "lang", "operator": "==", "value": "de"})
37
+
38
+ print(result["documents"])
39
+ ```
40
+ """
41
+
42
+ def __init__(self, document_store: DocumentStore, filters: Optional[Dict[str, Any]] = None):
43
+ """
44
+ Create the FilterRetriever component.
45
+
46
+ :param document_store:
47
+ An instance of a Document Store to use with the Retriever.
48
+ :param filters:
49
+ A dictionary with filters to narrow down the search space.
50
+ """
51
+ self.document_store = document_store
52
+ self.filters = filters
53
+
54
+ def _get_telemetry_data(self) -> Dict[str, Any]:
55
+ """
56
+ Data that is sent to Posthog for usage analytics.
57
+ """
58
+ return {"document_store": type(self.document_store).__name__}
59
+
60
+ def to_dict(self) -> Dict[str, Any]:
61
+ """
62
+ Serializes the component to a dictionary.
63
+
64
+ :returns:
65
+ Dictionary with serialized data.
66
+ """
67
+ docstore = self.document_store.to_dict()
68
+ return default_to_dict(self, document_store=docstore, filters=self.filters)
69
+
70
+ @classmethod
71
+ def from_dict(cls, data: Dict[str, Any]) -> "FilterRetriever":
72
+ """
73
+ Deserializes the component from a dictionary.
74
+
75
+ :param data:
76
+ The dictionary to deserialize from.
77
+ :returns:
78
+ The deserialized component.
79
+ """
80
+ # deserialize the document store
81
+ deserialize_document_store_in_init_params_inplace(data)
82
+
83
+ return default_from_dict(cls, data)
84
+
85
+ @component.output_types(documents=List[Document])
86
+ def run(self, filters: Optional[Dict[str, Any]] = None):
87
+ """
88
+ Run the FilterRetriever on the given input data.
89
+
90
+ :param filters:
91
+ A dictionary with filters to narrow down the search space.
92
+ If not specified, the FilterRetriever uses the values provided at initialization.
93
+ :returns:
94
+ A list of retrieved documents.
95
+ """
96
+ return {"documents": self.document_store.filter_documents(filters=filters or self.filters)}
testbed/deepset-ai__haystack/haystack/components/retrievers/in_memory/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.components.retrievers.in_memory.bm25_retriever import InMemoryBM25Retriever
6
+ from haystack.components.retrievers.in_memory.embedding_retriever import InMemoryEmbeddingRetriever
7
+
8
+ __all__ = ["InMemoryBM25Retriever", "InMemoryEmbeddingRetriever"]
testbed/deepset-ai__haystack/haystack/components/routers/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.components.routers.conditional_router import ConditionalRouter
6
+ from haystack.components.routers.file_type_router import FileTypeRouter
7
+ from haystack.components.routers.metadata_router import MetadataRouter
8
+ from haystack.components.routers.text_language_router import TextLanguageRouter
9
+ from haystack.components.routers.transformers_text_router import TransformersTextRouter
10
+ from haystack.components.routers.zero_shot_text_router import TransformersZeroShotTextRouter
11
+
12
+ __all__ = [
13
+ "FileTypeRouter",
14
+ "MetadataRouter",
15
+ "TextLanguageRouter",
16
+ "ConditionalRouter",
17
+ "TransformersZeroShotTextRouter",
18
+ "TransformersTextRouter",
19
+ ]
testbed/deepset-ai__haystack/haystack/components/routers/conditional_router.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import ast
6
+ import contextlib
7
+ from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Set, Union, get_args, get_origin
8
+ from warnings import warn
9
+
10
+ from jinja2 import Environment, TemplateSyntaxError, meta
11
+ from jinja2.nativetypes import NativeEnvironment
12
+ from jinja2.sandbox import SandboxedEnvironment
13
+
14
+ from haystack import component, default_from_dict, default_to_dict, logging
15
+ from haystack.utils import deserialize_callable, deserialize_type, serialize_callable, serialize_type
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class NoRouteSelectedException(Exception):
21
+ """Exception raised when no route is selected in ConditionalRouter."""
22
+
23
+
24
+ class RouteConditionException(Exception):
25
+ """Exception raised when there is an error parsing or evaluating the condition expression in ConditionalRouter."""
26
+
27
+
28
+ @component
29
+ class ConditionalRouter:
30
+ """
31
+ Routes data based on specific conditions.
32
+
33
+ You define these conditions in a list of dictionaries called `routes`.
34
+ Each dictionary in this list represents a single route. Each route has these four elements:
35
+ - `condition`: A Jinja2 string expression that determines if the route is selected.
36
+ - `output`: A Jinja2 expression defining the route's output value.
37
+ - `output_type`: The type of the output data (for example, `str`, `List[int]`).
38
+ - `output_name`: The name you want to use to publish `output`. This name is used to connect
39
+ the router to other components in the pipeline.
40
+
41
+ ### Usage example
42
+
43
+ ```python
44
+ from typing import List
45
+ from haystack.components.routers import ConditionalRouter
46
+
47
+ routes = [
48
+ {
49
+ "condition": "{{streams|length > 2}}",
50
+ "output": "{{streams}}",
51
+ "output_name": "enough_streams",
52
+ "output_type": List[int],
53
+ },
54
+ {
55
+ "condition": "{{streams|length <= 2}}",
56
+ "output": "{{streams}}",
57
+ "output_name": "insufficient_streams",
58
+ "output_type": List[int],
59
+ },
60
+ ]
61
+ router = ConditionalRouter(routes)
62
+ # When 'streams' has more than 2 items, 'enough_streams' output will activate, emitting the list [1, 2, 3]
63
+ kwargs = {"streams": [1, 2, 3], "query": "Haystack"}
64
+ result = router.run(**kwargs)
65
+ assert result == {"enough_streams": [1, 2, 3]}
66
+ ```
67
+
68
+ In this example, we configure two routes. The first route sends the 'streams' value to 'enough_streams' if the
69
+ stream count exceeds two. The second route directs 'streams' to 'insufficient_streams' if there
70
+ are two or fewer streams.
71
+
72
+ In the pipeline setup, the Router connects to other components using the output names. For example,
73
+ 'enough_streams' might connect to a component that processes streams, while
74
+ 'insufficient_streams' might connect to a component that fetches more streams.
75
+
76
+
77
+ Here is a pipeline that uses `ConditionalRouter` and routes the fetched `ByteStreams` to
78
+ different components depending on the number of streams fetched:
79
+
80
+ ```python
81
+ from typing import List
82
+ from haystack import Pipeline
83
+ from haystack.dataclasses import ByteStream
84
+ from haystack.components.routers import ConditionalRouter
85
+
86
+ routes = [
87
+ {
88
+ "condition": "{{streams|length > 2}}",
89
+ "output": "{{streams}}",
90
+ "output_name": "enough_streams",
91
+ "output_type": List[ByteStream],
92
+ },
93
+ {
94
+ "condition": "{{streams|length <= 2}}",
95
+ "output": "{{streams}}",
96
+ "output_name": "insufficient_streams",
97
+ "output_type": List[ByteStream],
98
+ },
99
+ ]
100
+
101
+ pipe = Pipeline()
102
+ pipe.add_component("router", router)
103
+ ...
104
+ pipe.connect("router.enough_streams", "some_component_a.streams")
105
+ pipe.connect("router.insufficient_streams", "some_component_b.streams_or_some_other_input")
106
+ ...
107
+ ```
108
+ """
109
+
110
+ def __init__(
111
+ self,
112
+ routes: List[Dict],
113
+ custom_filters: Optional[Dict[str, Callable]] = None,
114
+ unsafe: bool = False,
115
+ validate_output_type: bool = False,
116
+ ):
117
+ """
118
+ Initializes the `ConditionalRouter` with a list of routes detailing the conditions for routing.
119
+
120
+ :param routes: A list of dictionaries, each defining a route.
121
+ Each route has these four elements:
122
+ - `condition`: A Jinja2 string expression that determines if the route is selected.
123
+ - `output`: A Jinja2 expression defining the route's output value.
124
+ - `output_type`: The type of the output data (for example, `str`, `List[int]`).
125
+ - `output_name`: The name you want to use to publish `output`. This name is used to connect
126
+ the router to other components in the pipeline.
127
+ :param custom_filters: A dictionary of custom Jinja2 filters used in the condition expressions.
128
+ For example, passing `{"my_filter": my_filter_fcn}` where:
129
+ - `my_filter` is the name of the custom filter.
130
+ - `my_filter_fcn` is a callable that takes `my_var:str` and returns `my_var[:3]`.
131
+ `{{ my_var|my_filter }}` can then be used inside a route condition expression:
132
+ `"condition": "{{ my_var|my_filter == 'foo' }}"`.
133
+ :param unsafe:
134
+ Enable execution of arbitrary code in the Jinja template.
135
+ This should only be used if you trust the source of the template as it can be lead to remote code execution.
136
+ :param validate_output_type:
137
+ Enable validation of routes' output.
138
+ If a route output doesn't match the declared type a ValueError is raised running.
139
+ """
140
+ self.routes: List[dict] = routes
141
+ self.custom_filters = custom_filters or {}
142
+ self._unsafe = unsafe
143
+ self._validate_output_type = validate_output_type
144
+
145
+ # Create a Jinja environment to inspect variables in the condition templates
146
+ if self._unsafe:
147
+ msg = (
148
+ "Unsafe mode is enabled. This allows execution of arbitrary code in the Jinja template. "
149
+ "Use this only if you trust the source of the template."
150
+ )
151
+ warn(msg)
152
+
153
+ self._env = NativeEnvironment() if self._unsafe else SandboxedEnvironment()
154
+ self._env.filters.update(self.custom_filters)
155
+
156
+ self._validate_routes(routes)
157
+ # Inspect the routes to determine input and output types.
158
+ input_types: Set[str] = set() # let's just store the name, type will always be Any
159
+ output_types: Dict[str, str] = {}
160
+
161
+ for route in routes:
162
+ # extract inputs
163
+ route_input_names = self._extract_variables(self._env, [route["output"], route["condition"]])
164
+ input_types.update(route_input_names)
165
+
166
+ # extract outputs
167
+ output_types.update({route["output_name"]: route["output_type"]})
168
+
169
+ component.set_input_types(self, **{var: Any for var in input_types})
170
+ component.set_output_types(self, **output_types)
171
+
172
+ def to_dict(self) -> Dict[str, Any]:
173
+ """
174
+ Serializes the component to a dictionary.
175
+
176
+ :returns:
177
+ Dictionary with serialized data.
178
+ """
179
+ for route in self.routes:
180
+ # output_type needs to be serialized to a string
181
+ route["output_type"] = serialize_type(route["output_type"])
182
+ se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}
183
+ return default_to_dict(
184
+ self,
185
+ routes=self.routes,
186
+ custom_filters=se_filters,
187
+ unsafe=self._unsafe,
188
+ validate_output_type=self._validate_output_type,
189
+ )
190
+
191
+ @classmethod
192
+ def from_dict(cls, data: Dict[str, Any]) -> "ConditionalRouter":
193
+ """
194
+ Deserializes the component from a dictionary.
195
+
196
+ :param data:
197
+ The dictionary to deserialize from.
198
+ :returns:
199
+ The deserialized component.
200
+ """
201
+ init_params = data.get("init_parameters", {})
202
+ routes = init_params.get("routes")
203
+ for route in routes:
204
+ # output_type needs to be deserialized from a string to a type
205
+ route["output_type"] = deserialize_type(route["output_type"])
206
+
207
+ # Since the custom_filters are typed as optional in the init signature, we catch the
208
+ # case where they are not present in the serialized data and set them to an empty dict.
209
+ custom_filters = init_params.get("custom_filters", {})
210
+ if custom_filters is not None:
211
+ for name, filter_func in custom_filters.items():
212
+ init_params["custom_filters"][name] = deserialize_callable(filter_func) if filter_func else None
213
+ return default_from_dict(cls, data)
214
+
215
+ def run(self, **kwargs):
216
+ """
217
+ Executes the routing logic.
218
+
219
+ Executes the routing logic by evaluating the specified boolean condition expressions for each route in the
220
+ order they are listed. The method directs the flow of data to the output specified in the first route whose
221
+ `condition` is True.
222
+
223
+ :param kwargs: All variables used in the `condition` expressed in the routes. When the component is used in a
224
+ pipeline, these variables are passed from the previous component's output.
225
+
226
+ :returns: A dictionary where the key is the `output_name` of the selected route and the value is the `output`
227
+ of the selected route.
228
+
229
+ :raises NoRouteSelectedException:
230
+ If no `condition' in the routes is `True`.
231
+ :raises RouteConditionException:
232
+ If there is an error parsing or evaluating the `condition` expression in the routes.
233
+ :raises ValueError:
234
+ If type validation is enabled and route type doesn't match actual value type.
235
+ """
236
+ # Create a Jinja native environment to evaluate the condition templates as Python expressions
237
+ for route in self.routes:
238
+ try:
239
+ t = self._env.from_string(route["condition"])
240
+ rendered = t.render(**kwargs)
241
+ if not self._unsafe:
242
+ rendered = ast.literal_eval(rendered)
243
+ if not rendered:
244
+ continue
245
+ # We now evaluate the `output` expression to determine the route output
246
+ t_output = self._env.from_string(route["output"])
247
+ output = t_output.render(**kwargs)
248
+ # We suppress the exception in case the output is already a string, otherwise
249
+ # we try to evaluate it and would fail.
250
+ # This must be done cause the output could be different literal structures.
251
+ # This doesn't support any user types.
252
+ with contextlib.suppress(Exception):
253
+ if not self._unsafe:
254
+ output = ast.literal_eval(output)
255
+ except Exception as e:
256
+ msg = f"Error evaluating condition for route '{route}': {e}"
257
+ raise RouteConditionException(msg) from e
258
+
259
+ if self._validate_output_type and not self._output_matches_type(output, route["output_type"]):
260
+ msg = f"""Route '{route["output_name"]}' type doesn't match expected type"""
261
+ raise ValueError(msg)
262
+
263
+ # and return the output as a dictionary under the output_name key
264
+ return {route["output_name"]: output}
265
+
266
+ raise NoRouteSelectedException(f"No route fired. Routes: {self.routes}")
267
+
268
+ def _validate_routes(self, routes: List[Dict]):
269
+ """
270
+ Validates a list of routes.
271
+
272
+ :param routes: A list of routes.
273
+ """
274
+ for route in routes:
275
+ try:
276
+ keys = set(route.keys())
277
+ except AttributeError:
278
+ raise ValueError(f"Route must be a dictionary, got: {route}")
279
+
280
+ mandatory_fields = {"condition", "output", "output_type", "output_name"}
281
+ has_all_mandatory_fields = mandatory_fields.issubset(keys)
282
+ if not has_all_mandatory_fields:
283
+ raise ValueError(
284
+ f"Route must contain 'condition', 'output', 'output_type' and 'output_name' fields: {route}"
285
+ )
286
+ for field in ["condition", "output"]:
287
+ if not self._validate_template(self._env, route[field]):
288
+ raise ValueError(f"Invalid template for field '{field}': {route[field]}")
289
+
290
+ def _extract_variables(self, env: Environment, templates: List[str]) -> Set[str]:
291
+ """
292
+ Extracts all variables from a list of Jinja template strings.
293
+
294
+ :param env: A Jinja environment.
295
+ :param templates: A list of Jinja template strings.
296
+ :returns: A set of variable names.
297
+ """
298
+ variables = set()
299
+ for template in templates:
300
+ ast = env.parse(template)
301
+ variables.update(meta.find_undeclared_variables(ast))
302
+ return variables
303
+
304
+ def _validate_template(self, env: Environment, template_text: str):
305
+ """
306
+ Validates a template string by parsing it with Jinja.
307
+
308
+ :param env: A Jinja environment.
309
+ :param template_text: A Jinja template string.
310
+ :returns: `True` if the template is valid, `False` otherwise.
311
+ """
312
+ try:
313
+ env.parse(template_text)
314
+ return True
315
+ except TemplateSyntaxError:
316
+ return False
317
+
318
+ def _output_matches_type(self, value: Any, expected_type: type): # noqa: PLR0911 # pylint: disable=too-many-return-statements
319
+ """
320
+ Checks whether `value` type matches the `expected_type`.
321
+ """
322
+ # Handle Any type
323
+ if expected_type is Any:
324
+ return True
325
+
326
+ # Get the origin type (List, Dict, etc) and type arguments
327
+ origin = get_origin(expected_type)
328
+ args = get_args(expected_type)
329
+
330
+ # Handle basic types (int, str, etc)
331
+ if origin is None:
332
+ return isinstance(value, expected_type)
333
+
334
+ # Handle Sequence types (List, Tuple, etc)
335
+ if isinstance(origin, type) and issubclass(origin, Sequence):
336
+ if not isinstance(value, Sequence):
337
+ return False
338
+ # Empty sequence is valid
339
+ if not value:
340
+ return True
341
+ # Check each element against the sequence's type parameter
342
+ return all(self._output_matches_type(item, args[0]) for item in value)
343
+
344
+ # Handle basic types (int, str, etc)
345
+ if origin is None:
346
+ return isinstance(value, expected_type)
347
+
348
+ # Handle Mapping types (Dict, etc)
349
+ if isinstance(origin, type) and issubclass(origin, Mapping):
350
+ if not isinstance(value, Mapping):
351
+ return False
352
+ # Empty mapping is valid
353
+ if not value:
354
+ return True
355
+ key_type, value_type = args
356
+ # Check all keys and values match their respective types
357
+ return all(
358
+ self._output_matches_type(k, key_type) and self._output_matches_type(v, value_type)
359
+ for k, v in value.items()
360
+ )
361
+
362
+ # Handle Union types (including Optional)
363
+ if origin is Union:
364
+ return any(self._output_matches_type(value, arg) for arg in args)
365
+
366
+ return False
testbed/deepset-ai__haystack/haystack/components/routers/transformers_text_router.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from haystack import component, default_from_dict, default_to_dict, logging
8
+ from haystack.lazy_imports import LazyImport
9
+ from haystack.utils import ComponentDevice, Secret, deserialize_secrets_inplace
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ with LazyImport(message="Run 'pip install transformers[torch,sentencepiece]'") as torch_and_transformers_import:
15
+ from transformers import AutoConfig, pipeline
16
+
17
+ from haystack.utils.hf import ( # pylint: disable=ungrouped-imports
18
+ deserialize_hf_model_kwargs,
19
+ resolve_hf_pipeline_kwargs,
20
+ serialize_hf_model_kwargs,
21
+ )
22
+
23
+
24
+ @component
25
+ class TransformersTextRouter:
26
+ """
27
+ Routes the text strings to different connections based on a category label.
28
+
29
+ The labels are specific to each model and can be found it its description on Hugging Face.
30
+
31
+ ### Usage example
32
+
33
+ ```python
34
+ from haystack.core.pipeline import Pipeline
35
+ from haystack.components.routers import TransformersTextRouter
36
+ from haystack.components.builders import PromptBuilder
37
+ from haystack.components.generators import HuggingFaceLocalGenerator
38
+
39
+ p = Pipeline()
40
+ p.add_component(
41
+ instance=TransformersTextRouter(model="papluca/xlm-roberta-base-language-detection"),
42
+ name="text_router"
43
+ )
44
+ p.add_component(
45
+ instance=PromptBuilder(template="Answer the question: {{query}}\\nAnswer:"),
46
+ name="english_prompt_builder"
47
+ )
48
+ p.add_component(
49
+ instance=PromptBuilder(template="Beantworte die Frage: {{query}}\\nAntwort:"),
50
+ name="german_prompt_builder"
51
+ )
52
+
53
+ p.add_component(
54
+ instance=HuggingFaceLocalGenerator(model="DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1"),
55
+ name="german_llm"
56
+ )
57
+ p.add_component(
58
+ instance=HuggingFaceLocalGenerator(model="microsoft/Phi-3-mini-4k-instruct"),
59
+ name="english_llm"
60
+ )
61
+
62
+ p.connect("text_router.en", "english_prompt_builder.query")
63
+ p.connect("text_router.de", "german_prompt_builder.query")
64
+ p.connect("english_prompt_builder.prompt", "english_llm.prompt")
65
+ p.connect("german_prompt_builder.prompt", "german_llm.prompt")
66
+
67
+ # English Example
68
+ print(p.run({"text_router": {"text": "What is the capital of Germany?"}}))
69
+
70
+ # German Example
71
+ print(p.run({"text_router": {"text": "Was ist die Hauptstadt von Deutschland?"}}))
72
+ ```
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ model: str,
78
+ labels: Optional[List[str]] = None,
79
+ device: Optional[ComponentDevice] = None,
80
+ token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
81
+ huggingface_pipeline_kwargs: Optional[Dict[str, Any]] = None,
82
+ ):
83
+ """
84
+ Initializes the TransformersTextRouter component.
85
+
86
+ :param model: The name or path of a Hugging Face model for text classification.
87
+ :param labels: The list of labels. If not provided, the component fetches the labels
88
+ from the model configuration file hosted on the Hugging Face Hub using
89
+ `transformers.AutoConfig.from_pretrained`.
90
+ :param device: The device for loading the model. If `None`, automatically selects the default device.
91
+ If a device or device map is specified in `huggingface_pipeline_kwargs`, it overrides this parameter.
92
+ :param token: The API token used to download private models from Hugging Face.
93
+ If `True`, uses either `HF_API_TOKEN` or `HF_TOKEN` environment variables.
94
+ To generate these tokens, run `transformers-cli login`.
95
+ :param huggingface_pipeline_kwargs: A dictionary of keyword arguments for initializing the Hugging Face
96
+ text classification pipeline.
97
+ """
98
+ torch_and_transformers_import.check()
99
+
100
+ self.token = token
101
+
102
+ huggingface_pipeline_kwargs = resolve_hf_pipeline_kwargs(
103
+ huggingface_pipeline_kwargs=huggingface_pipeline_kwargs or {},
104
+ model=model,
105
+ task="text-classification",
106
+ supported_tasks=["text-classification"],
107
+ device=device,
108
+ token=token,
109
+ )
110
+ self.huggingface_pipeline_kwargs = huggingface_pipeline_kwargs
111
+
112
+ if labels is None:
113
+ config = AutoConfig.from_pretrained(
114
+ huggingface_pipeline_kwargs["model"], token=huggingface_pipeline_kwargs["token"]
115
+ )
116
+ self.labels = list(config.label2id.keys())
117
+ else:
118
+ self.labels = labels
119
+ component.set_output_types(self, **{label: str for label in self.labels})
120
+
121
+ self.pipeline = None
122
+
123
+ def _get_telemetry_data(self) -> Dict[str, Any]:
124
+ """
125
+ Data that is sent to Posthog for usage analytics.
126
+ """
127
+ if isinstance(self.huggingface_pipeline_kwargs["model"], str):
128
+ return {"model": self.huggingface_pipeline_kwargs["model"]}
129
+ return {"model": f"[object of type {type(self.huggingface_pipeline_kwargs['model'])}]"}
130
+
131
+ def warm_up(self):
132
+ """
133
+ Initializes the component.
134
+ """
135
+ if self.pipeline is None:
136
+ self.pipeline = pipeline(**self.huggingface_pipeline_kwargs)
137
+
138
+ # Verify labels from the model configuration file match provided labels
139
+ labels = set(self.pipeline.model.config.label2id.keys())
140
+ if set(self.labels) != labels:
141
+ raise ValueError(
142
+ f"The provided labels do not match the labels in the model configuration file. "
143
+ f"Provided labels: {self.labels}. Model labels: {labels}"
144
+ )
145
+
146
+ def to_dict(self) -> Dict[str, Any]:
147
+ """
148
+ Serializes the component to a dictionary.
149
+
150
+ :returns:
151
+ Dictionary with serialized data.
152
+ """
153
+ serialization_dict = default_to_dict(
154
+ self,
155
+ labels=self.labels,
156
+ model=self.huggingface_pipeline_kwargs["model"],
157
+ huggingface_pipeline_kwargs=self.huggingface_pipeline_kwargs,
158
+ token=self.token.to_dict() if self.token else None,
159
+ )
160
+
161
+ huggingface_pipeline_kwargs = serialization_dict["init_parameters"]["huggingface_pipeline_kwargs"]
162
+ huggingface_pipeline_kwargs.pop("token", None)
163
+
164
+ serialize_hf_model_kwargs(huggingface_pipeline_kwargs)
165
+ return serialization_dict
166
+
167
+ @classmethod
168
+ def from_dict(cls, data: Dict[str, Any]) -> "TransformersTextRouter":
169
+ """
170
+ Deserializes the component from a dictionary.
171
+
172
+ :param data:
173
+ Dictionary to deserialize from.
174
+ :returns:
175
+ Deserialized component.
176
+ """
177
+ deserialize_secrets_inplace(data["init_parameters"], keys=["token"])
178
+ if data["init_parameters"].get("huggingface_pipeline_kwargs") is not None:
179
+ deserialize_hf_model_kwargs(data["init_parameters"]["huggingface_pipeline_kwargs"])
180
+ return default_from_dict(cls, data)
181
+
182
+ def run(self, text: str) -> Dict[str, str]:
183
+ """
184
+ Routes the text strings to different connections based on a category label.
185
+
186
+ :param text: A string of text to route.
187
+ :returns:
188
+ A dictionary with the label as key and the text as value.
189
+
190
+ :raises TypeError:
191
+ If the input is not a str.
192
+ :raises RuntimeError:
193
+ If the pipeline has not been loaded because warm_up() was not called before.
194
+ """
195
+ if self.pipeline is None:
196
+ raise RuntimeError(
197
+ "The component TextTransformersRouter wasn't warmed up. Run 'warm_up()' before calling 'run()'."
198
+ )
199
+
200
+ if not isinstance(text, str):
201
+ raise TypeError("TransformersTextRouter expects a str as input.")
202
+
203
+ prediction = self.pipeline([text], return_all_scores=False, function_to_apply="none")
204
+ label = prediction[0]["label"]
205
+ return {label: text}
testbed/deepset-ai__haystack/haystack/components/routers/zero_shot_text_router.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from haystack import component, default_from_dict, default_to_dict, logging
8
+ from haystack.lazy_imports import LazyImport
9
+ from haystack.utils import ComponentDevice, Secret, deserialize_secrets_inplace
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ with LazyImport(message="Run 'pip install transformers[torch,sentencepiece]'") as torch_and_transformers_import:
15
+ from transformers import pipeline
16
+
17
+ from haystack.utils.hf import ( # pylint: disable=ungrouped-imports
18
+ deserialize_hf_model_kwargs,
19
+ resolve_hf_pipeline_kwargs,
20
+ serialize_hf_model_kwargs,
21
+ )
22
+
23
+
24
+ @component
25
+ class TransformersZeroShotTextRouter:
26
+ """
27
+ Routes the text strings to different connections based on a category label.
28
+
29
+ Specify the set of labels for categorization when initializing the component.
30
+
31
+ ### Usage example
32
+
33
+ ```python
34
+ from haystack import Document
35
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
36
+ from haystack.core.pipeline import Pipeline
37
+ from haystack.components.routers import TransformersZeroShotTextRouter
38
+ from haystack.components.embedders import SentenceTransformersTextEmbedder, SentenceTransformersDocumentEmbedder
39
+ from haystack.components.retrievers import InMemoryEmbeddingRetriever
40
+
41
+ document_store = InMemoryDocumentStore()
42
+ doc_embedder = SentenceTransformersDocumentEmbedder(model="intfloat/e5-base-v2")
43
+ doc_embedder.warm_up()
44
+ docs = [
45
+ Document(
46
+ content="Germany, officially the Federal Republic of Germany, is a country in the western region of "
47
+ "Central Europe. The nation's capital and most populous city is Berlin and its main financial centre "
48
+ "is Frankfurt; the largest urban area is the Ruhr."
49
+ ),
50
+ Document(
51
+ content="France, officially the French Republic, is a country located primarily in Western Europe. "
52
+ "France is a unitary semi-presidential republic with its capital in Paris, the country's largest city "
53
+ "and main cultural and commercial centre; other major urban areas include Marseille, Lyon, Toulouse, "
54
+ "Lille, Bordeaux, Strasbourg, Nantes and Nice."
55
+ )
56
+ ]
57
+ docs_with_embeddings = doc_embedder.run(docs)
58
+ document_store.write_documents(docs_with_embeddings["documents"])
59
+
60
+ p = Pipeline()
61
+ p.add_component(instance=TransformersZeroShotTextRouter(labels=["passage", "query"]), name="text_router")
62
+ p.add_component(
63
+ instance=SentenceTransformersTextEmbedder(model="intfloat/e5-base-v2", prefix="passage: "),
64
+ name="passage_embedder"
65
+ )
66
+ p.add_component(
67
+ instance=SentenceTransformersTextEmbedder(model="intfloat/e5-base-v2", prefix="query: "),
68
+ name="query_embedder"
69
+ )
70
+ p.add_component(
71
+ instance=InMemoryEmbeddingRetriever(document_store=document_store),
72
+ name="query_retriever"
73
+ )
74
+ p.add_component(
75
+ instance=InMemoryEmbeddingRetriever(document_store=document_store),
76
+ name="passage_retriever"
77
+ )
78
+
79
+ p.connect("text_router.passage", "passage_embedder.text")
80
+ p.connect("passage_embedder.embedding", "passage_retriever.query_embedding")
81
+ p.connect("text_router.query", "query_embedder.text")
82
+ p.connect("query_embedder.embedding", "query_retriever.query_embedding")
83
+
84
+ # Query Example
85
+ p.run({"text_router": {"text": "What is the capital of Germany?"}})
86
+
87
+ # Passage Example
88
+ p.run({
89
+ "text_router":{
90
+ "text": "The United Kingdom of Great Britain and Northern Ireland, commonly known as the "\
91
+ "United Kingdom (UK) or Britain, is a country in Northwestern Europe, off the north-western coast of "\
92
+ "the continental mainland."
93
+ }
94
+ })
95
+ ```
96
+ """
97
+
98
+ def __init__(
99
+ self,
100
+ labels: List[str],
101
+ multi_label: bool = False,
102
+ model: str = "MoritzLaurer/deberta-v3-base-zeroshot-v1.1-all-33",
103
+ device: Optional[ComponentDevice] = None,
104
+ token: Optional[Secret] = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
105
+ huggingface_pipeline_kwargs: Optional[Dict[str, Any]] = None,
106
+ ):
107
+ """
108
+ Initializes the TransformersZeroShotTextRouter component.
109
+
110
+ :param labels: The set of labels to use for classification. Can be a single label,
111
+ a string of comma-separated labels, or a list of labels.
112
+ :param multi_label:
113
+ Indicates if multiple labels can be true.
114
+ If `False`, label scores are normalized so their sum equals 1 for each sequence.
115
+ If `True`, the labels are considered independent and probabilities are normalized for each candidate by
116
+ doing a softmax of the entailment score vs. the contradiction score.
117
+ :param model: The name or path of a Hugging Face model for zero-shot text classification.
118
+ :param device: The device for loading the model. If `None`, automatically selects the default device.
119
+ If a device or device map is specified in `huggingface_pipeline_kwargs`, it overrides this parameter.
120
+ :param token: The API token used to download private models from Hugging Face.
121
+ If `True`, uses either `HF_API_TOKEN` or `HF_TOKEN` environment variables.
122
+ To generate these tokens, run `transformers-cli login`.
123
+ :param huggingface_pipeline_kwargs: A dictionary of keyword arguments for initializing the Hugging Face
124
+ zero shot text classification.
125
+ """
126
+ torch_and_transformers_import.check()
127
+
128
+ self.token = token
129
+ self.labels = labels
130
+ self.multi_label = multi_label
131
+ component.set_output_types(self, **{label: str for label in labels})
132
+
133
+ huggingface_pipeline_kwargs = resolve_hf_pipeline_kwargs(
134
+ huggingface_pipeline_kwargs=huggingface_pipeline_kwargs or {},
135
+ model=model,
136
+ task="zero-shot-classification",
137
+ supported_tasks=["zero-shot-classification"],
138
+ device=device,
139
+ token=token,
140
+ )
141
+ self.huggingface_pipeline_kwargs = huggingface_pipeline_kwargs
142
+ self.pipeline = None
143
+
144
+ def _get_telemetry_data(self) -> Dict[str, Any]:
145
+ """
146
+ Data that is sent to Posthog for usage analytics.
147
+ """
148
+ if isinstance(self.huggingface_pipeline_kwargs["model"], str):
149
+ return {"model": self.huggingface_pipeline_kwargs["model"]}
150
+ return {"model": f"[object of type {type(self.huggingface_pipeline_kwargs['model'])}]"}
151
+
152
+ def warm_up(self):
153
+ """
154
+ Initializes the component.
155
+ """
156
+ if self.pipeline is None:
157
+ self.pipeline = pipeline(**self.huggingface_pipeline_kwargs)
158
+
159
+ def to_dict(self) -> Dict[str, Any]:
160
+ """
161
+ Serializes the component to a dictionary.
162
+
163
+ :returns:
164
+ Dictionary with serialized data.
165
+ """
166
+ serialization_dict = default_to_dict(
167
+ self,
168
+ labels=self.labels,
169
+ huggingface_pipeline_kwargs=self.huggingface_pipeline_kwargs,
170
+ token=self.token.to_dict() if self.token else None,
171
+ )
172
+
173
+ huggingface_pipeline_kwargs = serialization_dict["init_parameters"]["huggingface_pipeline_kwargs"]
174
+ huggingface_pipeline_kwargs.pop("token", None)
175
+
176
+ serialize_hf_model_kwargs(huggingface_pipeline_kwargs)
177
+ return serialization_dict
178
+
179
+ @classmethod
180
+ def from_dict(cls, data: Dict[str, Any]) -> "TransformersZeroShotTextRouter":
181
+ """
182
+ Deserializes the component from a dictionary.
183
+
184
+ :param data:
185
+ Dictionary to deserialize from.
186
+ :returns:
187
+ Deserialized component.
188
+ """
189
+ deserialize_secrets_inplace(data["init_parameters"], keys=["token"])
190
+ if data["init_parameters"].get("huggingface_pipeline_kwargs") is not None:
191
+ deserialize_hf_model_kwargs(data["init_parameters"]["huggingface_pipeline_kwargs"])
192
+ return default_from_dict(cls, data)
193
+
194
+ def run(self, text: str) -> Dict[str, str]:
195
+ """
196
+ Routes the text strings to different connections based on a category label.
197
+
198
+ :param text: A string of text to route.
199
+ :returns:
200
+ A dictionary with the label as key and the text as value.
201
+
202
+ :raises TypeError:
203
+ If the input is not a str.
204
+ :raises RuntimeError:
205
+ If the pipeline has not been loaded because warm_up() was not called before.
206
+ """
207
+ if self.pipeline is None:
208
+ raise RuntimeError(
209
+ "The component TransformersZeroShotTextRouter wasn't warmed up. Run 'warm_up()' before calling 'run()'."
210
+ )
211
+
212
+ if not isinstance(text, str):
213
+ raise TypeError("TransformersZeroShotTextRouter expects a str as input.")
214
+
215
+ prediction = self.pipeline(sequences=[text], candidate_labels=self.labels, multi_label=self.multi_label)
216
+ predicted_scores = prediction[0]["scores"]
217
+ max_score_index = max(range(len(predicted_scores)), key=predicted_scores.__getitem__)
218
+ label = prediction[0]["labels"][max_score_index]
219
+ return {label: text}
testbed/deepset-ai__haystack/haystack/components/samplers/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from haystack.components.samplers.top_p import TopPSampler
6
+
7
+ __all__ = ["TopPSampler"]
testbed/deepset-ai__haystack/haystack/components/websearch/serper_dev.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import json
6
+ from typing import Any, Dict, List, Optional, Union
7
+
8
+ import requests
9
+
10
+ from haystack import ComponentError, Document, component, default_from_dict, default_to_dict, logging
11
+ from haystack.utils import Secret, deserialize_secrets_inplace
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ SERPERDEV_BASE_URL = "https://google.serper.dev/search"
17
+
18
+
19
+ class SerperDevError(ComponentError): ...
20
+
21
+
22
+ @component
23
+ class SerperDevWebSearch:
24
+ """
25
+ Uses [Serper](https://serper.dev/) to search the web for relevant documents.
26
+
27
+ See the [Serper Dev website](https://serper.dev/) for more details.
28
+
29
+ Usage example:
30
+ ```python
31
+ from haystack.components.websearch import SerperDevWebSearch
32
+ from haystack.utils import Secret
33
+
34
+ websearch = SerperDevWebSearch(top_k=10, api_key=Secret.from_token("test-api-key"))
35
+ results = websearch.run(query="Who is the boyfriend of Olivia Wilde?")
36
+
37
+ assert results["documents"]
38
+ assert results["links"]
39
+ ```
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ api_key: Secret = Secret.from_env_var("SERPERDEV_API_KEY"),
45
+ top_k: Optional[int] = 10,
46
+ allowed_domains: Optional[List[str]] = None,
47
+ search_params: Optional[Dict[str, Any]] = None,
48
+ ):
49
+ """
50
+ Initialize the SerperDevWebSearch component.
51
+
52
+ :param api_key: API key for the Serper API.
53
+ :param top_k: Number of documents to return.
54
+ :param allowed_domains: List of domains to limit the search to.
55
+ :param search_params: Additional parameters passed to the Serper API.
56
+ For example, you can set 'num' to 20 to increase the number of search results.
57
+ See the [Serper website](https://serper.dev/) for more details.
58
+ """
59
+ self.api_key = api_key
60
+ self.top_k = top_k
61
+ self.allowed_domains = allowed_domains
62
+ self.search_params = search_params or {}
63
+
64
+ # Ensure that the API key is resolved.
65
+ _ = self.api_key.resolve_value()
66
+
67
+ def to_dict(self) -> Dict[str, Any]:
68
+ """
69
+ Serializes the component to a dictionary.
70
+
71
+ :returns:
72
+ Dictionary with serialized data.
73
+ """
74
+ return default_to_dict(
75
+ self,
76
+ top_k=self.top_k,
77
+ allowed_domains=self.allowed_domains,
78
+ search_params=self.search_params,
79
+ api_key=self.api_key.to_dict(),
80
+ )
81
+
82
+ @classmethod
83
+ def from_dict(cls, data: Dict[str, Any]) -> "SerperDevWebSearch":
84
+ """
85
+ Serializes the component to a dictionary.
86
+
87
+ :returns:
88
+ Dictionary with serialized data.
89
+ """
90
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
91
+ return default_from_dict(cls, data)
92
+
93
+ @component.output_types(documents=List[Document], links=List[str])
94
+ def run(self, query: str) -> Dict[str, Union[List[Document], List[str]]]:
95
+ """
96
+ Use [Serper](https://serper.dev/) to search the web.
97
+
98
+ :param query: Search query.
99
+ :returns: A dictionary with the following keys:
100
+ - "documents": List of documents returned by the search engine.
101
+ - "links": List of links returned by the search engine.
102
+ :raises SerperDevError: If an error occurs while querying the SerperDev API.
103
+ :raises TimeoutError: If the request to the SerperDev API times out.
104
+ """
105
+ query_prepend = "OR ".join(f"site:{domain} " for domain in self.allowed_domains) if self.allowed_domains else ""
106
+
107
+ payload = json.dumps(
108
+ {"q": query_prepend + query, "gl": "us", "hl": "en", "autocorrect": True, **self.search_params}
109
+ )
110
+ headers = {"X-API-KEY": self.api_key.resolve_value(), "Content-Type": "application/json"}
111
+
112
+ try:
113
+ response = requests.post(SERPERDEV_BASE_URL, headers=headers, data=payload, timeout=30) # type: ignore
114
+ response.raise_for_status() # Will raise an HTTPError for bad responses
115
+ except requests.Timeout as error:
116
+ raise TimeoutError(f"Request to {self.__class__.__name__} timed out.") from error
117
+
118
+ except requests.RequestException as e:
119
+ raise SerperDevError(f"An error occurred while querying {self.__class__.__name__}. Error: {e}") from e
120
+
121
+ # If we reached this point, it means the request was successful and we can proceed
122
+ json_result = response.json()
123
+
124
+ # we get the snippet from the json result and put it in the content field of the document
125
+ organic = [
126
+ Document(meta={k: v for k, v in d.items() if k != "snippet"}, content=d.get("snippet"))
127
+ for d in json_result["organic"]
128
+ ]
129
+
130
+ # answer box is what search engine shows as a direct answer to the query
131
+ answer_box = []
132
+ if "answerBox" in json_result:
133
+ answer_dict = json_result["answerBox"]
134
+ highlighted_answers = answer_dict.get("snippetHighlighted")
135
+ answer_box_content = None
136
+ # Check if highlighted_answers is a list and has at least one element
137
+ if isinstance(highlighted_answers, list) and len(highlighted_answers) > 0:
138
+ answer_box_content = highlighted_answers[0]
139
+ elif isinstance(highlighted_answers, str):
140
+ answer_box_content = highlighted_answers
141
+ if not answer_box_content:
142
+ for key in ["snippet", "answer", "title"]:
143
+ if key in answer_dict:
144
+ answer_box_content = answer_dict[key]
145
+ break
146
+ if answer_box_content:
147
+ answer_box = [
148
+ Document(
149
+ content=answer_box_content,
150
+ meta={"title": answer_dict.get("title", ""), "link": answer_dict.get("link", "")},
151
+ )
152
+ ]
153
+
154
+ # these are related questions that search engine shows
155
+ people_also_ask = []
156
+ if "peopleAlsoAsk" in json_result:
157
+ for result in json_result["peopleAlsoAsk"]:
158
+ title = result.get("title", "")
159
+ people_also_ask.append(
160
+ Document(
161
+ content=result["snippet"] if result.get("snippet") else title,
162
+ meta={"title": title, "link": result.get("link", None)},
163
+ )
164
+ )
165
+
166
+ documents = answer_box + organic + people_also_ask
167
+
168
+ links = [result["link"] for result in json_result["organic"]]
169
+
170
+ logger.debug(
171
+ "Serper Dev returned {number_documents} documents for the query '{query}'",
172
+ number_documents=len(documents),
173
+ query=query,
174
+ )
175
+ return {"documents": documents[: self.top_k], "links": links[: self.top_k]}