Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- testbed/deepset-ai__haystack/haystack/__init__.py +34 -0
- testbed/deepset-ai__haystack/haystack/components/__init__.py +3 -0
- testbed/deepset-ai__haystack/haystack/components/connectors/__init__.py +7 -0
- testbed/deepset-ai__haystack/haystack/components/connectors/openapi_service.py +270 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/__init__.py +25 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/answer_exact_match.py +69 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/context_relevance.py +222 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/document_map.py +90 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/document_mrr.py +84 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/document_ndcg.py +133 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/document_recall.py +133 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/faithfulness.py +216 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/llm_evaluator.py +387 -0
- testbed/deepset-ai__haystack/haystack/components/evaluators/sas_evaluator.py +201 -0
- testbed/deepset-ai__haystack/haystack/components/extractors/__init__.py +11 -0
- testbed/deepset-ai__haystack/haystack/components/extractors/named_entity_extractor.py +485 -0
- testbed/deepset-ai__haystack/haystack/components/joiners/answer_joiner.py +172 -0
- testbed/deepset-ai__haystack/haystack/components/joiners/branch.py +141 -0
- testbed/deepset-ai__haystack/haystack/components/joiners/document_joiner.py +264 -0
- testbed/deepset-ai__haystack/haystack/components/joiners/string_joiner.py +59 -0
- testbed/deepset-ai__haystack/haystack/components/retrievers/__init__.py +10 -0
- testbed/deepset-ai__haystack/haystack/components/retrievers/sentence_window_retriever.py +197 -0
- testbed/deepset-ai__haystack/haystack/components/routers/file_type_router.py +184 -0
- testbed/deepset-ai__haystack/haystack/components/routers/text_language_router.py +102 -0
- testbed/deepset-ai__haystack/haystack/components/samplers/top_p.py +177 -0
- testbed/deepset-ai__haystack/haystack/components/validators/__init__.py +7 -0
- testbed/deepset-ai__haystack/haystack/components/validators/json_schema.py +263 -0
- testbed/deepset-ai__haystack/haystack/components/websearch/__init__.py +8 -0
- testbed/deepset-ai__haystack/haystack/components/websearch/searchapi.py +179 -0
- testbed/deepset-ai__haystack/haystack/components/writers/__init__.py +7 -0
- testbed/deepset-ai__haystack/haystack/components/writers/document_writer.py +102 -0
- testbed/deepset-ai__haystack/haystack/core/__init__.py +3 -0
- testbed/deepset-ai__haystack/haystack/core/component/__init__.py +8 -0
- testbed/deepset-ai__haystack/haystack/core/component/component.py +560 -0
- testbed/deepset-ai__haystack/haystack/core/component/sockets.py +148 -0
- testbed/deepset-ai__haystack/haystack/core/component/types.py +111 -0
- testbed/deepset-ai__haystack/haystack/core/errors.py +47 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/__init__.py +8 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/base.py +1375 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/descriptions.py +73 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/draw.py +154 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/pipeline.py +550 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/chat_with_website.yaml.jinja2 +52 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/generative_qa.yaml.jinja2 +24 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/indexing.yaml.jinja2 +67 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/rag.yaml.jinja2 +70 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/template.py +130 -0
- testbed/deepset-ai__haystack/haystack/core/pipeline/utils.py +20 -0
- testbed/deepset-ai__haystack/haystack/core/serialization.py +259 -0
- testbed/deepset-ai__haystack/haystack/core/type_utils.py +84 -0
testbed/deepset-ai__haystack/haystack/__init__.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import haystack.logging
|
| 6 |
+
import haystack.tracing
|
| 7 |
+
from haystack.core.component import component
|
| 8 |
+
from haystack.core.errors import ComponentError, DeserializationError
|
| 9 |
+
from haystack.core.pipeline import Pipeline, PredefinedPipeline
|
| 10 |
+
from haystack.core.serialization import default_from_dict, default_to_dict
|
| 11 |
+
from haystack.dataclasses import Answer, Document, ExtractedAnswer, GeneratedAnswer
|
| 12 |
+
|
| 13 |
+
# Initialize the logging configuration
|
| 14 |
+
# This is a no-op unless `structlog` is installed
|
| 15 |
+
haystack.logging.configure_logging()
|
| 16 |
+
|
| 17 |
+
# Same for tracing (no op if `opentelemetry` or `ddtrace` is not installed)
|
| 18 |
+
haystack.tracing.auto_enable_tracing()
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
"component",
|
| 22 |
+
"default_from_dict",
|
| 23 |
+
"default_to_dict",
|
| 24 |
+
"DeserializationError",
|
| 25 |
+
"ComponentError",
|
| 26 |
+
"Pipeline",
|
| 27 |
+
"PredefinedPipeline",
|
| 28 |
+
"Document",
|
| 29 |
+
"Answer",
|
| 30 |
+
"GeneratedAnswer",
|
| 31 |
+
"ExtractedAnswer",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
# FIXME: remove before merging PR
|
testbed/deepset-ai__haystack/haystack/components/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
testbed/deepset-ai__haystack/haystack/components/connectors/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.connectors.openapi_service import OpenAPIServiceConnector
|
| 6 |
+
|
| 7 |
+
__all__ = ["OpenAPIServiceConnector"]
|
testbed/deepset-ai__haystack/haystack/components/connectors/openapi_service.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from copy import copy
|
| 8 |
+
from typing import Any, Dict, List, Optional, Union
|
| 9 |
+
|
| 10 |
+
from haystack import component, logging
|
| 11 |
+
from haystack.dataclasses import ChatMessage, ChatRole
|
| 12 |
+
from haystack.lazy_imports import LazyImport
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
with LazyImport("Run 'pip install openapi3'") as openapi_imports:
|
| 17 |
+
from openapi3 import OpenAPI
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@component
|
| 21 |
+
class OpenAPIServiceConnector:
|
| 22 |
+
"""
|
| 23 |
+
A component which connects the Haystack framework to OpenAPI services.
|
| 24 |
+
|
| 25 |
+
The `OpenAPIServiceConnector` component connects the Haystack framework to OpenAPI services, enabling it to call
|
| 26 |
+
operations as defined in the OpenAPI specification of the service.
|
| 27 |
+
|
| 28 |
+
It integrates with `ChatMessage` dataclass, where the payload in messages is used to determine the method to be
|
| 29 |
+
called and the parameters to be passed. The message payload should be an OpenAI JSON formatted function calling
|
| 30 |
+
string consisting of the method name and the parameters to be passed to the method. The method name and parameters
|
| 31 |
+
are then used to invoke the method on the OpenAPI service. The response from the service is returned as a
|
| 32 |
+
`ChatMessage`.
|
| 33 |
+
|
| 34 |
+
Before using this component, users usually resolve service endpoint parameters with a help of
|
| 35 |
+
`OpenAPIServiceToFunctions` component.
|
| 36 |
+
|
| 37 |
+
The example below demonstrates how to use the `OpenAPIServiceConnector` to invoke a method on a https://serper.dev/
|
| 38 |
+
service specified via OpenAPI specification.
|
| 39 |
+
|
| 40 |
+
Note, however, that `OpenAPIServiceConnector` is usually not meant to be used directly, but rather as part of a
|
| 41 |
+
pipeline that includes the `OpenAPIServiceToFunctions` component and an `OpenAIChatGenerator` component using LLM
|
| 42 |
+
with the function calling capabilities. In the example below we use the function calling payload directly, but in a
|
| 43 |
+
real-world scenario, the function calling payload would usually be generated by the `OpenAIChatGenerator` component.
|
| 44 |
+
|
| 45 |
+
Usage example:
|
| 46 |
+
|
| 47 |
+
```python
|
| 48 |
+
import json
|
| 49 |
+
import requests
|
| 50 |
+
|
| 51 |
+
from haystack.components.connectors import OpenAPIServiceConnector
|
| 52 |
+
from haystack.dataclasses import ChatMessage
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
fc_payload = [{'function': {'arguments': '{"q": "Why was Sam Altman ousted from OpenAI?"}', 'name': 'search'},
|
| 56 |
+
'id': 'call_PmEBYvZ7mGrQP5PUASA5m9wO', 'type': 'function'}]
|
| 57 |
+
|
| 58 |
+
serper_token = <your_serper_dev_token>
|
| 59 |
+
serperdev_openapi_spec = json.loads(requests.get("https://bit.ly/serper_dev_spec").text)
|
| 60 |
+
service_connector = OpenAPIServiceConnector()
|
| 61 |
+
result = service_connector.run(messages=[ChatMessage.from_assistant(json.dumps(fc_payload))],
|
| 62 |
+
service_openapi_spec=serperdev_openapi_spec, service_credentials=serper_token)
|
| 63 |
+
print(result)
|
| 64 |
+
|
| 65 |
+
>> {'service_response': [ChatMessage(content='{"searchParameters": {"q": "Why was Sam Altman ousted from OpenAI?",
|
| 66 |
+
>> "type": "search", "engine": "google"}, "answerBox": {"snippet": "Concerns over AI safety and OpenAI\'s role
|
| 67 |
+
>> in protecting were at the center of Altman\'s brief ouster from the company."...
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self):
|
| 73 |
+
"""
|
| 74 |
+
Initializes the OpenAPIServiceConnector instance
|
| 75 |
+
"""
|
| 76 |
+
openapi_imports.check()
|
| 77 |
+
|
| 78 |
+
@component.output_types(service_response=Dict[str, Any])
|
| 79 |
+
def run(
|
| 80 |
+
self,
|
| 81 |
+
messages: List[ChatMessage],
|
| 82 |
+
service_openapi_spec: Dict[str, Any],
|
| 83 |
+
service_credentials: Optional[Union[dict, str]] = None,
|
| 84 |
+
) -> Dict[str, List[ChatMessage]]:
|
| 85 |
+
"""
|
| 86 |
+
Processes a list of chat messages to invoke a method on an OpenAPI service.
|
| 87 |
+
|
| 88 |
+
It parses the last message in the list, expecting it to contain an OpenAI function calling descriptor
|
| 89 |
+
(name & parameters) in JSON format.
|
| 90 |
+
|
| 91 |
+
:param messages: A list of `ChatMessage` objects containing the messages to be processed. The last message
|
| 92 |
+
should contain the function invocation payload in OpenAI function calling format. See the example in the class
|
| 93 |
+
docstring for the expected format.
|
| 94 |
+
:param service_openapi_spec: The OpenAPI JSON specification object of the service to be invoked. All the refs
|
| 95 |
+
should already be resolved.
|
| 96 |
+
:param service_credentials: The credentials to be used for authentication with the service.
|
| 97 |
+
Currently, only the http and apiKey OpenAPI security schemes are supported.
|
| 98 |
+
|
| 99 |
+
:return: A dictionary with the following keys:
|
| 100 |
+
- `service_response`: a list of `ChatMessage` objects, each containing the response from the service. The
|
| 101 |
+
response is in JSON format, and the `content` attribute of the `ChatMessage` contains
|
| 102 |
+
the JSON string.
|
| 103 |
+
|
| 104 |
+
:raises ValueError: If the last message is not from the assistant or if it does not contain the correct payload
|
| 105 |
+
to invoke a method on the service.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
last_message = messages[-1]
|
| 109 |
+
if not last_message.is_from(ChatRole.ASSISTANT):
|
| 110 |
+
raise ValueError(f"{last_message} is not from the assistant.")
|
| 111 |
+
|
| 112 |
+
function_invocation_payloads = self._parse_message(last_message)
|
| 113 |
+
|
| 114 |
+
# instantiate the OpenAPI service for the given specification
|
| 115 |
+
openapi_service = OpenAPI(service_openapi_spec)
|
| 116 |
+
self._authenticate_service(openapi_service, service_credentials)
|
| 117 |
+
|
| 118 |
+
response_messages = []
|
| 119 |
+
for method_invocation_descriptor in function_invocation_payloads:
|
| 120 |
+
service_response = self._invoke_method(openapi_service, method_invocation_descriptor)
|
| 121 |
+
# openapi3 parses the JSON service response into a model object, which is not our focus at the moment.
|
| 122 |
+
# Instead, we require direct access to the raw JSON data of the response, rather than the model objects
|
| 123 |
+
# provided by the openapi3 library. This approach helps us avoid issues related to (de)serialization.
|
| 124 |
+
# By accessing the raw JSON response through `service_response._raw_data`, we can serialize this data
|
| 125 |
+
# into a string. Finally, we use this string to create a ChatMessage object.
|
| 126 |
+
response_messages.append(ChatMessage.from_user(json.dumps(service_response._raw_data)))
|
| 127 |
+
|
| 128 |
+
return {"service_response": response_messages}
|
| 129 |
+
|
| 130 |
+
def _parse_message(self, message: ChatMessage) -> List[Dict[str, Any]]:
|
| 131 |
+
"""
|
| 132 |
+
Parses the message to extract the method invocation descriptor.
|
| 133 |
+
|
| 134 |
+
:param message: ChatMessage containing the tools calls
|
| 135 |
+
:return: A list of function invocation payloads
|
| 136 |
+
:raises ValueError: If the content is not valid JSON or lacks required fields.
|
| 137 |
+
"""
|
| 138 |
+
function_payloads = []
|
| 139 |
+
try:
|
| 140 |
+
tool_calls = json.loads(message.content)
|
| 141 |
+
except json.JSONDecodeError:
|
| 142 |
+
raise ValueError("Invalid JSON content, expected OpenAI tools message.", message.content)
|
| 143 |
+
|
| 144 |
+
for tool_call in tool_calls:
|
| 145 |
+
# this should never happen, but just in case do a sanity check
|
| 146 |
+
if "type" not in tool_call:
|
| 147 |
+
raise ValueError("Message payload doesn't seem to be a tool invocation descriptor", message.content)
|
| 148 |
+
|
| 149 |
+
# In OpenAPIServiceConnector we know how to handle functions tools only
|
| 150 |
+
if tool_call["type"] == "function":
|
| 151 |
+
function_call = tool_call["function"]
|
| 152 |
+
function_payloads.append(
|
| 153 |
+
{"arguments": json.loads(function_call["arguments"]), "name": function_call["name"]}
|
| 154 |
+
)
|
| 155 |
+
return function_payloads
|
| 156 |
+
|
| 157 |
+
def _authenticate_service(self, openapi_service: OpenAPI, credentials: Optional[Union[dict, str]] = None):
|
| 158 |
+
"""
|
| 159 |
+
Authentication with an OpenAPI service.
|
| 160 |
+
|
| 161 |
+
Authenticates with the OpenAPI service if required, supporting both single (str) and multiple
|
| 162 |
+
authentication methods (dict).
|
| 163 |
+
|
| 164 |
+
OpenAPI spec v3 supports the following security schemes:
|
| 165 |
+
http – for Basic, Bearer and other HTTP authentications schemes
|
| 166 |
+
apiKey – for API keys and cookie authentication
|
| 167 |
+
oauth2 – for OAuth 2
|
| 168 |
+
openIdConnect – for OpenID Connect Discovery
|
| 169 |
+
|
| 170 |
+
Currently, only the http and apiKey schemes are supported. Multiple security schemes can be defined in the
|
| 171 |
+
OpenAPI spec, and the credentials should be provided as a dictionary with keys matching the security scheme
|
| 172 |
+
names. If only one security scheme is defined, the credentials can be provided as a simple string.
|
| 173 |
+
|
| 174 |
+
:param openapi_service: The OpenAPI service instance.
|
| 175 |
+
:param credentials: Credentials for authentication, which can be either a string (e.g. token) or a dictionary
|
| 176 |
+
with keys matching the authentication method names.
|
| 177 |
+
:raises ValueError: If authentication fails, is not found, or if appropriate credentials are missing.
|
| 178 |
+
"""
|
| 179 |
+
if openapi_service.raw_element.get("components", {}).get("securitySchemes"):
|
| 180 |
+
service_name = openapi_service.info.title
|
| 181 |
+
if not credentials:
|
| 182 |
+
raise ValueError(f"Service {service_name} requires authentication but no credentials were provided.")
|
| 183 |
+
|
| 184 |
+
# a dictionary of security schemes defined in the OpenAPI spec
|
| 185 |
+
# each key is the name of the security scheme, and the value is the scheme definition
|
| 186 |
+
security_schemes = openapi_service.components.securitySchemes.raw_element
|
| 187 |
+
supported_schemes = ["http", "apiKey"] # todo: add support for oauth2 and openIdConnect
|
| 188 |
+
|
| 189 |
+
authenticated = False
|
| 190 |
+
for scheme_name, scheme in security_schemes.items():
|
| 191 |
+
if scheme["type"] in supported_schemes:
|
| 192 |
+
auth_credentials = None
|
| 193 |
+
if isinstance(credentials, str):
|
| 194 |
+
auth_credentials = credentials
|
| 195 |
+
elif isinstance(credentials, dict) and scheme_name in credentials:
|
| 196 |
+
auth_credentials = credentials[scheme_name]
|
| 197 |
+
if auth_credentials:
|
| 198 |
+
openapi_service.authenticate(scheme_name, auth_credentials)
|
| 199 |
+
authenticated = True
|
| 200 |
+
break
|
| 201 |
+
|
| 202 |
+
raise ValueError(
|
| 203 |
+
f"Service {service_name} requires {scheme_name} security scheme but no "
|
| 204 |
+
f"credentials were provided for it. Check the service configuration and credentials."
|
| 205 |
+
)
|
| 206 |
+
if not authenticated:
|
| 207 |
+
raise ValueError(
|
| 208 |
+
f"Service {service_name} requires authentication but no credentials were provided "
|
| 209 |
+
f"for it. Check the service configuration and credentials."
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
def _invoke_method(self, openapi_service: OpenAPI, method_invocation_descriptor: Dict[str, Any]) -> Any:
|
| 213 |
+
"""
|
| 214 |
+
Invokes the specified method on the OpenAPI service.
|
| 215 |
+
|
| 216 |
+
The method name and arguments are passed in the method_invocation_descriptor.
|
| 217 |
+
|
| 218 |
+
:param openapi_service: The OpenAPI service instance.
|
| 219 |
+
:param method_invocation_descriptor: The method name and arguments to be passed to the method. The payload
|
| 220 |
+
should contain the method name (key: "name") and the arguments (key: "arguments"). The name is a string, and
|
| 221 |
+
the arguments are a dictionary of key-value pairs.
|
| 222 |
+
:return: A service JSON response.
|
| 223 |
+
:raises RuntimeError: If the method is not found or invocation fails.
|
| 224 |
+
"""
|
| 225 |
+
name = method_invocation_descriptor.get("name")
|
| 226 |
+
invocation_arguments = copy(method_invocation_descriptor.get("arguments", {}))
|
| 227 |
+
if not name or not invocation_arguments:
|
| 228 |
+
raise ValueError(
|
| 229 |
+
f"Invalid function calling descriptor: {method_invocation_descriptor} . It should contain "
|
| 230 |
+
f"a method name and arguments."
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# openapi3 specific method to call the operation, do we have it?
|
| 234 |
+
method_to_call = getattr(openapi_service, f"call_{name}", None)
|
| 235 |
+
if not callable(method_to_call):
|
| 236 |
+
raise RuntimeError(f"Operation {name} not found in OpenAPI specification {openapi_service.info.title}")
|
| 237 |
+
|
| 238 |
+
# get the operation reference from the method_to_call
|
| 239 |
+
operation = method_to_call.operation.__self__
|
| 240 |
+
operation_dict = operation.raw_element
|
| 241 |
+
|
| 242 |
+
# Pack URL/query parameters under "parameters" key
|
| 243 |
+
method_call_params: Dict[str, Dict[str, Any]] = defaultdict(dict)
|
| 244 |
+
parameters = operation_dict.get("parameters", [])
|
| 245 |
+
request_body = operation_dict.get("requestBody", {})
|
| 246 |
+
|
| 247 |
+
for param in parameters:
|
| 248 |
+
param_name = param["name"]
|
| 249 |
+
param_value = invocation_arguments.get(param_name)
|
| 250 |
+
if param_value:
|
| 251 |
+
method_call_params["parameters"][param_name] = param_value
|
| 252 |
+
else:
|
| 253 |
+
if param.get("required", False):
|
| 254 |
+
raise ValueError(f"Missing parameter: '{param_name}' required for the '{name}' operation.")
|
| 255 |
+
|
| 256 |
+
# Pack request body parameters under "data" key
|
| 257 |
+
if request_body:
|
| 258 |
+
schema = request_body.get("content", {}).get("application/json", {}).get("schema", {})
|
| 259 |
+
required_params = schema.get("required", [])
|
| 260 |
+
for param_name in schema.get("properties", {}):
|
| 261 |
+
param_value = invocation_arguments.get(param_name)
|
| 262 |
+
if param_value:
|
| 263 |
+
method_call_params["data"][param_name] = param_value
|
| 264 |
+
else:
|
| 265 |
+
if param_name in required_params:
|
| 266 |
+
raise ValueError(
|
| 267 |
+
f"Missing requestBody parameter: '{param_name}' required for the '{name}' operation."
|
| 268 |
+
)
|
| 269 |
+
# call the underlying service REST API with the parameters
|
| 270 |
+
return method_to_call(**method_call_params)
|
testbed/deepset-ai__haystack/haystack/components/evaluators/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from .answer_exact_match import AnswerExactMatchEvaluator
|
| 6 |
+
from .context_relevance import ContextRelevanceEvaluator
|
| 7 |
+
from .document_map import DocumentMAPEvaluator
|
| 8 |
+
from .document_mrr import DocumentMRREvaluator
|
| 9 |
+
from .document_ndcg import DocumentNDCGEvaluator
|
| 10 |
+
from .document_recall import DocumentRecallEvaluator
|
| 11 |
+
from .faithfulness import FaithfulnessEvaluator
|
| 12 |
+
from .llm_evaluator import LLMEvaluator
|
| 13 |
+
from .sas_evaluator import SASEvaluator
|
| 14 |
+
|
| 15 |
+
__all__ = [
|
| 16 |
+
"AnswerExactMatchEvaluator",
|
| 17 |
+
"ContextRelevanceEvaluator",
|
| 18 |
+
"DocumentMAPEvaluator",
|
| 19 |
+
"DocumentMRREvaluator",
|
| 20 |
+
"DocumentNDCGEvaluator",
|
| 21 |
+
"DocumentRecallEvaluator",
|
| 22 |
+
"FaithfulnessEvaluator",
|
| 23 |
+
"LLMEvaluator",
|
| 24 |
+
"SASEvaluator",
|
| 25 |
+
]
|
testbed/deepset-ai__haystack/haystack/components/evaluators/answer_exact_match.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List
|
| 6 |
+
|
| 7 |
+
from haystack.core.component import component
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@component
|
| 11 |
+
class AnswerExactMatchEvaluator:
|
| 12 |
+
"""
|
| 13 |
+
An answer exact match evaluator class.
|
| 14 |
+
|
| 15 |
+
The evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
|
| 16 |
+
The result is a number from 0.0 to 1.0, it represents the proportion of predicted answers
|
| 17 |
+
that matched one of the ground truth answers.
|
| 18 |
+
There can be multiple ground truth answers and multiple predicted answers as input.
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Usage example:
|
| 22 |
+
```python
|
| 23 |
+
from haystack.components.evaluators import AnswerExactMatchEvaluator
|
| 24 |
+
|
| 25 |
+
evaluator = AnswerExactMatchEvaluator()
|
| 26 |
+
result = evaluator.run(
|
| 27 |
+
ground_truth_answers=["Berlin", "Paris"],
|
| 28 |
+
predicted_answers=["Berlin", "Lyon"],
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
print(result["individual_scores"])
|
| 32 |
+
# [1, 0]
|
| 33 |
+
print(result["score"])
|
| 34 |
+
# 0.5
|
| 35 |
+
```
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
@component.output_types(individual_scores=List[int], score=float)
|
| 39 |
+
def run(self, ground_truth_answers: List[str], predicted_answers: List[str]) -> Dict[str, Any]:
|
| 40 |
+
"""
|
| 41 |
+
Run the AnswerExactMatchEvaluator on the given inputs.
|
| 42 |
+
|
| 43 |
+
The `ground_truth_answers` and `retrieved_answers` must have the same length.
|
| 44 |
+
|
| 45 |
+
:param ground_truth_answers:
|
| 46 |
+
A list of expected answers.
|
| 47 |
+
:param predicted_answers:
|
| 48 |
+
A list of predicted answers.
|
| 49 |
+
:returns:
|
| 50 |
+
A dictionary with the following outputs:
|
| 51 |
+
- `individual_scores` - A list of 0s and 1s, where 1 means that the predicted answer matched one of the
|
| 52 |
+
ground truth.
|
| 53 |
+
- `score` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
|
| 54 |
+
answer matched one of the ground truth answers.
|
| 55 |
+
"""
|
| 56 |
+
if not len(ground_truth_answers) == len(predicted_answers):
|
| 57 |
+
raise ValueError("The length of ground_truth_answers and predicted_answers must be the same.")
|
| 58 |
+
|
| 59 |
+
matches = []
|
| 60 |
+
for truth, extracted in zip(ground_truth_answers, predicted_answers):
|
| 61 |
+
if truth == extracted:
|
| 62 |
+
matches.append(1)
|
| 63 |
+
else:
|
| 64 |
+
matches.append(0)
|
| 65 |
+
|
| 66 |
+
# The proportion of questions where any predicted answer matched one of the ground truth answers
|
| 67 |
+
average = sum(matches) / len(predicted_answers)
|
| 68 |
+
|
| 69 |
+
return {"individual_scores": matches, "score": average}
|
testbed/deepset-ai__haystack/haystack/components/evaluators/context_relevance.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from statistics import mean
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from haystack import component, default_from_dict, default_to_dict
|
| 9 |
+
from haystack.components.evaluators.llm_evaluator import LLMEvaluator
|
| 10 |
+
from haystack.utils import Secret, deserialize_secrets_inplace
|
| 11 |
+
|
| 12 |
+
# Private global variable for default examples to include in the prompt if the user does not provide any examples
|
| 13 |
+
_DEFAULT_EXAMPLES = [
|
| 14 |
+
{
|
| 15 |
+
"inputs": {
|
| 16 |
+
"questions": "What is the capital of Germany?",
|
| 17 |
+
"contexts": ["Berlin is the capital of Germany. Berlin and was founded in 1244."],
|
| 18 |
+
},
|
| 19 |
+
"outputs": {"relevant_statements": ["Berlin is the capital of Germany."]},
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"inputs": {
|
| 23 |
+
"questions": "What is the capital of France?",
|
| 24 |
+
"contexts": [
|
| 25 |
+
"Berlin is the capital of Germany and was founded in 1244.",
|
| 26 |
+
"Europe is a continent with 44 countries.",
|
| 27 |
+
"Madrid is the capital of Spain.",
|
| 28 |
+
],
|
| 29 |
+
},
|
| 30 |
+
"outputs": {"relevant_statements": []},
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
|
| 34 |
+
"outputs": {"relevant_statements": ["Rome is the capital of Italy."]},
|
| 35 |
+
},
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@component
|
| 40 |
+
class ContextRelevanceEvaluator(LLMEvaluator):
|
| 41 |
+
"""
|
| 42 |
+
Evaluator that checks if a provided context is relevant to the question.
|
| 43 |
+
|
| 44 |
+
An LLM breaks up a context into multiple statements and checks whether each statement
|
| 45 |
+
is relevant for answering a question.
|
| 46 |
+
The score for each context is either binary score of 1 or 0, where 1 indicates that the context is relevant
|
| 47 |
+
to the question and 0 indicates that the context is not relevant.
|
| 48 |
+
The evaluator also provides the relevant statements from the context and an average score over all the provided
|
| 49 |
+
input questions contexts pairs.
|
| 50 |
+
|
| 51 |
+
Usage example:
|
| 52 |
+
```python
|
| 53 |
+
from haystack.components.evaluators import ContextRelevanceEvaluator
|
| 54 |
+
|
| 55 |
+
questions = ["Who created the Python language?", "Why does Java needs a JVM?", "Is C++ better than Python?"]
|
| 56 |
+
contexts = [
|
| 57 |
+
[(
|
| 58 |
+
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
|
| 59 |
+
"language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
|
| 60 |
+
"programmers write clear, logical code for both small and large-scale software projects."
|
| 61 |
+
)],
|
| 62 |
+
[(
|
| 63 |
+
"Java is a high-level, class-based, object-oriented programming language that is designed to have as few "
|
| 64 |
+
"implementation dependencies as possible. The JVM has two primary functions: to allow Java programs to run"
|
| 65 |
+
"on any device or operating system (known as the 'write once, run anywhere' principle), and to manage and"
|
| 66 |
+
"optimize program memory."
|
| 67 |
+
)],
|
| 68 |
+
[(
|
| 69 |
+
"C++ is a general-purpose programming language created by Bjarne Stroustrup as an extension of the C "
|
| 70 |
+
"programming language."
|
| 71 |
+
)],
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
evaluator = ContextRelevanceEvaluator()
|
| 75 |
+
result = evaluator.run(questions=questions, contexts=contexts)
|
| 76 |
+
print(result["score"])
|
| 77 |
+
# 0.67
|
| 78 |
+
print(result["individual_scores"])
|
| 79 |
+
# [1,1,0]
|
| 80 |
+
print(result["results"])
|
| 81 |
+
# [{
|
| 82 |
+
# 'relevant_statements': ['Python, created by Guido van Rossum in the late 1980s.'],
|
| 83 |
+
# 'score': 1.0
|
| 84 |
+
# },
|
| 85 |
+
# {
|
| 86 |
+
# 'relevant_statements': ['The JVM has two primary functions: to allow Java programs to run on any device or
|
| 87 |
+
# operating system (known as the "write once, run anywhere" principle), and to manage and
|
| 88 |
+
# optimize program memory'],
|
| 89 |
+
# 'score': 1.0
|
| 90 |
+
# },
|
| 91 |
+
# {
|
| 92 |
+
# 'relevant_statements': [],
|
| 93 |
+
# 'score': 0.0
|
| 94 |
+
# }]
|
| 95 |
+
```
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(
|
| 99 |
+
self,
|
| 100 |
+
examples: Optional[List[Dict[str, Any]]] = None,
|
| 101 |
+
progress_bar: bool = True,
|
| 102 |
+
api: str = "openai",
|
| 103 |
+
api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
|
| 104 |
+
api_params: Optional[Dict[str, Any]] = None,
|
| 105 |
+
raise_on_failure: bool = True,
|
| 106 |
+
):
|
| 107 |
+
"""
|
| 108 |
+
Creates an instance of ContextRelevanceEvaluator.
|
| 109 |
+
|
| 110 |
+
:param examples:
|
| 111 |
+
Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.
|
| 112 |
+
Default examples will be used if none are provided.
|
| 113 |
+
Each example must be a dictionary with keys "inputs" and "outputs".
|
| 114 |
+
"inputs" must be a dictionary with keys "questions" and "contexts".
|
| 115 |
+
"outputs" must be a dictionary with "relevant_statements".
|
| 116 |
+
Expected format:
|
| 117 |
+
[{
|
| 118 |
+
"inputs": {
|
| 119 |
+
"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
|
| 120 |
+
},
|
| 121 |
+
"outputs": {
|
| 122 |
+
"relevant_statements": ["Rome is the capital of Italy."],
|
| 123 |
+
},
|
| 124 |
+
}]
|
| 125 |
+
:param progress_bar:
|
| 126 |
+
Whether to show a progress bar during the evaluation.
|
| 127 |
+
:param api:
|
| 128 |
+
The API to use for calling an LLM through a Generator.
|
| 129 |
+
Supported APIs: "openai".
|
| 130 |
+
:param api_key:
|
| 131 |
+
The API key.
|
| 132 |
+
:param api_params:
|
| 133 |
+
Parameters for an OpenAI API compatible completions call.
|
| 134 |
+
:param raise_on_failure:
|
| 135 |
+
Whether to raise an exception if the API call fails.
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
self.instructions = (
|
| 140 |
+
"Please extract only sentences from the provided context which are absolutely relevant and "
|
| 141 |
+
"required to answer the following question. If no relevant sentences are found, or if you "
|
| 142 |
+
"believe the question cannot be answered from the given context, return an empty list, example: []"
|
| 143 |
+
)
|
| 144 |
+
self.inputs = [("questions", List[str]), ("contexts", List[List[str]])]
|
| 145 |
+
self.outputs = ["relevant_statements"]
|
| 146 |
+
self.examples = examples or _DEFAULT_EXAMPLES
|
| 147 |
+
self.api = api
|
| 148 |
+
self.api_key = api_key
|
| 149 |
+
self.api_params = api_params or {}
|
| 150 |
+
|
| 151 |
+
super(ContextRelevanceEvaluator, self).__init__(
|
| 152 |
+
instructions=self.instructions,
|
| 153 |
+
inputs=self.inputs,
|
| 154 |
+
outputs=self.outputs,
|
| 155 |
+
examples=self.examples,
|
| 156 |
+
api=self.api,
|
| 157 |
+
api_key=self.api_key,
|
| 158 |
+
api_params=self.api_params,
|
| 159 |
+
raise_on_failure=raise_on_failure,
|
| 160 |
+
progress_bar=progress_bar,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
@component.output_types(score=float, results=List[Dict[str, Any]])
|
| 164 |
+
def run(self, **inputs) -> Dict[str, Any]:
|
| 165 |
+
"""
|
| 166 |
+
Run the LLM evaluator.
|
| 167 |
+
|
| 168 |
+
:param questions:
|
| 169 |
+
A list of questions.
|
| 170 |
+
:param contexts:
|
| 171 |
+
A list of lists of contexts. Each list of contexts corresponds to one question.
|
| 172 |
+
:returns:
|
| 173 |
+
A dictionary with the following outputs:
|
| 174 |
+
- `score`: Mean context relevance score over all the provided input questions.
|
| 175 |
+
- `results`: A list of dictionaries with `relevant_statements` and `score` for each input context.
|
| 176 |
+
"""
|
| 177 |
+
result = super(ContextRelevanceEvaluator, self).run(**inputs)
|
| 178 |
+
|
| 179 |
+
for idx, res in enumerate(result["results"]):
|
| 180 |
+
if res is None:
|
| 181 |
+
result["results"][idx] = {"relevant_statements": [], "score": float("nan")}
|
| 182 |
+
continue
|
| 183 |
+
if len(res["relevant_statements"]) > 0:
|
| 184 |
+
res["score"] = 1
|
| 185 |
+
else:
|
| 186 |
+
res["score"] = 0
|
| 187 |
+
|
| 188 |
+
# calculate average context relevance score over all queries
|
| 189 |
+
result["score"] = mean([res["score"] for res in result["results"]])
|
| 190 |
+
result["individual_scores"] = [res["score"] for res in result["results"]] # useful for the EvaluationRunResult
|
| 191 |
+
|
| 192 |
+
return result
|
| 193 |
+
|
| 194 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 195 |
+
"""
|
| 196 |
+
Serialize this component to a dictionary.
|
| 197 |
+
|
| 198 |
+
:returns:
|
| 199 |
+
A dictionary with serialized data.
|
| 200 |
+
"""
|
| 201 |
+
return default_to_dict(
|
| 202 |
+
self,
|
| 203 |
+
api=self.api,
|
| 204 |
+
api_key=self.api_key.to_dict() if self.api_key else None,
|
| 205 |
+
examples=self.examples,
|
| 206 |
+
progress_bar=self.progress_bar,
|
| 207 |
+
api_params=self.api_params,
|
| 208 |
+
raise_on_failure=self.raise_on_failure,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
@classmethod
|
| 212 |
+
def from_dict(cls, data: Dict[str, Any]) -> "ContextRelevanceEvaluator":
|
| 213 |
+
"""
|
| 214 |
+
Deserialize this component from a dictionary.
|
| 215 |
+
|
| 216 |
+
:param data:
|
| 217 |
+
The dictionary representation of this component.
|
| 218 |
+
:returns:
|
| 219 |
+
The deserialized component instance.
|
| 220 |
+
"""
|
| 221 |
+
deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
|
| 222 |
+
return default_from_dict(cls, data)
|
testbed/deepset-ai__haystack/haystack/components/evaluators/document_map.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List
|
| 6 |
+
|
| 7 |
+
from haystack import Document, component
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@component
|
| 11 |
+
class DocumentMAPEvaluator:
|
| 12 |
+
"""
|
| 13 |
+
A Mean Average Precision (MAP) evaluator for documents.
|
| 14 |
+
|
| 15 |
+
Evaluator that calculates the mean average precision of the retrieved documents, a metric
|
| 16 |
+
that measures how high retrieved documents are ranked.
|
| 17 |
+
Each question can have multiple ground truth documents and multiple retrieved documents.
|
| 18 |
+
|
| 19 |
+
`DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
|
| 20 |
+
should be used to clean and normalize the documents before passing them to this evaluator.
|
| 21 |
+
|
| 22 |
+
Usage example:
|
| 23 |
+
```python
|
| 24 |
+
from haystack import Document
|
| 25 |
+
from haystack.components.evaluators import DocumentMAPEvaluator
|
| 26 |
+
|
| 27 |
+
evaluator = DocumentMAPEvaluator()
|
| 28 |
+
result = evaluator.run(
|
| 29 |
+
ground_truth_documents=[
|
| 30 |
+
[Document(content="France")],
|
| 31 |
+
[Document(content="9th century"), Document(content="9th")],
|
| 32 |
+
],
|
| 33 |
+
retrieved_documents=[
|
| 34 |
+
[Document(content="France")],
|
| 35 |
+
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
|
| 36 |
+
],
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
print(result["individual_scores"])
|
| 40 |
+
# [1.0, 0.8333333333333333]
|
| 41 |
+
print(result["score"])
|
| 42 |
+
# 0.9166666666666666
|
| 43 |
+
```
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
# Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
|
| 47 |
+
@component.output_types(score=float, individual_scores=List[float])
|
| 48 |
+
def run(
|
| 49 |
+
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
|
| 50 |
+
) -> Dict[str, Any]:
|
| 51 |
+
"""
|
| 52 |
+
Run the DocumentMAPEvaluator on the given inputs.
|
| 53 |
+
|
| 54 |
+
All lists must have the same length.
|
| 55 |
+
|
| 56 |
+
:param ground_truth_documents:
|
| 57 |
+
A list of expected documents for each question.
|
| 58 |
+
:param retrieved_documents:
|
| 59 |
+
A list of retrieved documents for each question.
|
| 60 |
+
:returns:
|
| 61 |
+
A dictionary with the following outputs:
|
| 62 |
+
- `score` - The average of calculated scores.
|
| 63 |
+
- `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents
|
| 64 |
+
are ranked.
|
| 65 |
+
"""
|
| 66 |
+
if len(ground_truth_documents) != len(retrieved_documents):
|
| 67 |
+
msg = "The length of ground_truth_documents and retrieved_documents must be the same."
|
| 68 |
+
raise ValueError(msg)
|
| 69 |
+
|
| 70 |
+
individual_scores = []
|
| 71 |
+
|
| 72 |
+
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
|
| 73 |
+
average_precision = 0.0
|
| 74 |
+
average_precision_numerator = 0.0
|
| 75 |
+
relevant_documents = 0
|
| 76 |
+
|
| 77 |
+
ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
|
| 78 |
+
for rank, retrieved_document in enumerate(retrieved):
|
| 79 |
+
if retrieved_document.content is None:
|
| 80 |
+
continue
|
| 81 |
+
|
| 82 |
+
if retrieved_document.content in ground_truth_contents:
|
| 83 |
+
relevant_documents += 1
|
| 84 |
+
average_precision_numerator += relevant_documents / (rank + 1)
|
| 85 |
+
if relevant_documents > 0:
|
| 86 |
+
average_precision = average_precision_numerator / relevant_documents
|
| 87 |
+
individual_scores.append(average_precision)
|
| 88 |
+
|
| 89 |
+
score = sum(individual_scores) / len(ground_truth_documents)
|
| 90 |
+
return {"score": score, "individual_scores": individual_scores}
|
testbed/deepset-ai__haystack/haystack/components/evaluators/document_mrr.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List
|
| 6 |
+
|
| 7 |
+
from haystack import Document, component
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@component
|
| 11 |
+
class DocumentMRREvaluator:
|
| 12 |
+
"""
|
| 13 |
+
Evaluator that calculates the mean reciprocal rank of the retrieved documents.
|
| 14 |
+
|
| 15 |
+
MRR measures how high the first retrieved document is ranked.
|
| 16 |
+
Each question can have multiple ground truth documents and multiple retrieved documents.
|
| 17 |
+
|
| 18 |
+
`DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
|
| 19 |
+
should be used to clean and normalize the documents before passing them to this evaluator.
|
| 20 |
+
|
| 21 |
+
Usage example:
|
| 22 |
+
```python
|
| 23 |
+
from haystack import Document
|
| 24 |
+
from haystack.components.evaluators import DocumentMRREvaluator
|
| 25 |
+
|
| 26 |
+
evaluator = DocumentMRREvaluator()
|
| 27 |
+
result = evaluator.run(
|
| 28 |
+
ground_truth_documents=[
|
| 29 |
+
[Document(content="France")],
|
| 30 |
+
[Document(content="9th century"), Document(content="9th")],
|
| 31 |
+
],
|
| 32 |
+
retrieved_documents=[
|
| 33 |
+
[Document(content="France")],
|
| 34 |
+
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
|
| 35 |
+
],
|
| 36 |
+
)
|
| 37 |
+
print(result["individual_scores"])
|
| 38 |
+
# [1.0, 1.0]
|
| 39 |
+
print(result["score"])
|
| 40 |
+
# 1.0
|
| 41 |
+
```
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
# Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
|
| 45 |
+
@component.output_types(score=float, individual_scores=List[float])
|
| 46 |
+
def run(
|
| 47 |
+
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
|
| 48 |
+
) -> Dict[str, Any]:
|
| 49 |
+
"""
|
| 50 |
+
Run the DocumentMRREvaluator on the given inputs.
|
| 51 |
+
|
| 52 |
+
`ground_truth_documents` and `retrieved_documents` must have the same length.
|
| 53 |
+
|
| 54 |
+
:param ground_truth_documents:
|
| 55 |
+
A list of expected documents for each question.
|
| 56 |
+
:param retrieved_documents:
|
| 57 |
+
A list of retrieved documents for each question.
|
| 58 |
+
:returns:
|
| 59 |
+
A dictionary with the following outputs:
|
| 60 |
+
- `score` - The average of calculated scores.
|
| 61 |
+
- `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved
|
| 62 |
+
document is ranked.
|
| 63 |
+
"""
|
| 64 |
+
if len(ground_truth_documents) != len(retrieved_documents):
|
| 65 |
+
msg = "The length of ground_truth_documents and retrieved_documents must be the same."
|
| 66 |
+
raise ValueError(msg)
|
| 67 |
+
|
| 68 |
+
individual_scores = []
|
| 69 |
+
|
| 70 |
+
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
|
| 71 |
+
reciprocal_rank = 0.0
|
| 72 |
+
|
| 73 |
+
ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
|
| 74 |
+
for rank, retrieved_document in enumerate(retrieved):
|
| 75 |
+
if retrieved_document.content is None:
|
| 76 |
+
continue
|
| 77 |
+
if retrieved_document.content in ground_truth_contents:
|
| 78 |
+
reciprocal_rank = 1 / (rank + 1)
|
| 79 |
+
break
|
| 80 |
+
individual_scores.append(reciprocal_rank)
|
| 81 |
+
|
| 82 |
+
score = sum(individual_scores) / len(ground_truth_documents)
|
| 83 |
+
|
| 84 |
+
return {"score": score, "individual_scores": individual_scores}
|
testbed/deepset-ai__haystack/haystack/components/evaluators/document_ndcg.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from math import log2
|
| 6 |
+
from typing import Any, Dict, List
|
| 7 |
+
|
| 8 |
+
from haystack import Document, component
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@component
|
| 12 |
+
class DocumentNDCGEvaluator:
|
| 13 |
+
"""
|
| 14 |
+
Evaluator that calculates the normalized discounted cumulative gain (NDCG) of retrieved documents.
|
| 15 |
+
|
| 16 |
+
Each question can have multiple ground truth documents and multiple retrieved documents.
|
| 17 |
+
If the ground truth documents have relevance scores, the NDCG calculation uses these scores.
|
| 18 |
+
Otherwise, it assumes binary relevance of all ground truth documents.
|
| 19 |
+
|
| 20 |
+
Usage example:
|
| 21 |
+
```python
|
| 22 |
+
from haystack import Document
|
| 23 |
+
from haystack.components.evaluators import DocumentNDCGEvaluator
|
| 24 |
+
|
| 25 |
+
evaluator = DocumentNDCGEvaluator()
|
| 26 |
+
result = evaluator.run(
|
| 27 |
+
ground_truth_documents=[[Document(content="France", score=1.0), Document(content="Paris", score=0.5)]],
|
| 28 |
+
retrieved_documents=[[Document(content="France"), Document(content="Germany"), Document(content="Paris")]],
|
| 29 |
+
)
|
| 30 |
+
print(result["individual_scores"])
|
| 31 |
+
# [0.8869]
|
| 32 |
+
print(result["score"])
|
| 33 |
+
# 0.8869
|
| 34 |
+
```
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
@component.output_types(score=float, individual_scores=List[float])
|
| 38 |
+
def run(
|
| 39 |
+
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
|
| 40 |
+
) -> Dict[str, Any]:
|
| 41 |
+
"""
|
| 42 |
+
Run the DocumentNDCGEvaluator on the given inputs.
|
| 43 |
+
|
| 44 |
+
`ground_truth_documents` and `retrieved_documents` must have the same length.
|
| 45 |
+
The list items within `ground_truth_documents` and `retrieved_documents` can differ in length.
|
| 46 |
+
|
| 47 |
+
:param ground_truth_documents:
|
| 48 |
+
Lists of expected documents, one list per question. Binary relevance is used if documents have no scores.
|
| 49 |
+
:param retrieved_documents:
|
| 50 |
+
Lists of retrieved documents, one list per question.
|
| 51 |
+
:returns:
|
| 52 |
+
A dictionary with the following outputs:
|
| 53 |
+
- `score` - The average of calculated scores.
|
| 54 |
+
- `individual_scores` - A list of numbers from 0.0 to 1.0 that represents the NDCG for each question.
|
| 55 |
+
"""
|
| 56 |
+
self.validate_inputs(ground_truth_documents, retrieved_documents)
|
| 57 |
+
|
| 58 |
+
individual_scores = []
|
| 59 |
+
|
| 60 |
+
for gt_docs, ret_docs in zip(ground_truth_documents, retrieved_documents):
|
| 61 |
+
dcg = self.calculate_dcg(gt_docs, ret_docs)
|
| 62 |
+
idcg = self.calculate_idcg(gt_docs)
|
| 63 |
+
ndcg = dcg / idcg if idcg > 0 else 0
|
| 64 |
+
individual_scores.append(ndcg)
|
| 65 |
+
|
| 66 |
+
score = sum(individual_scores) / len(ground_truth_documents)
|
| 67 |
+
|
| 68 |
+
return {"score": score, "individual_scores": individual_scores}
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def validate_inputs(gt_docs: List[List[Document]], ret_docs: List[List[Document]]):
|
| 72 |
+
"""
|
| 73 |
+
Validate the input parameters.
|
| 74 |
+
|
| 75 |
+
:param gt_docs:
|
| 76 |
+
The ground_truth_documents to validate.
|
| 77 |
+
:param ret_docs:
|
| 78 |
+
The retrieved_documents to validate.
|
| 79 |
+
|
| 80 |
+
:raises ValueError:
|
| 81 |
+
If the ground_truth_documents or the retrieved_documents are an empty a list.
|
| 82 |
+
If the length of ground_truth_documents and retrieved_documents differs.
|
| 83 |
+
If any list of documents in ground_truth_documents contains a mix of documents with and without a score.
|
| 84 |
+
"""
|
| 85 |
+
if len(gt_docs) == 0 or len(ret_docs) == 0:
|
| 86 |
+
msg = "ground_truth_documents and retrieved_documents must be provided."
|
| 87 |
+
raise ValueError(msg)
|
| 88 |
+
|
| 89 |
+
if len(gt_docs) != len(ret_docs):
|
| 90 |
+
msg = "The length of ground_truth_documents and retrieved_documents must be the same."
|
| 91 |
+
raise ValueError(msg)
|
| 92 |
+
|
| 93 |
+
for docs in gt_docs:
|
| 94 |
+
if any(doc.score is not None for doc in docs) and any(doc.score is None for doc in docs):
|
| 95 |
+
msg = "Either none or all documents in each list of ground_truth_documents must have a score."
|
| 96 |
+
raise ValueError(msg)
|
| 97 |
+
|
| 98 |
+
@staticmethod
|
| 99 |
+
def calculate_dcg(gt_docs: List[Document], ret_docs: List[Document]) -> float:
|
| 100 |
+
"""
|
| 101 |
+
Calculate the discounted cumulative gain (DCG) of the retrieved documents.
|
| 102 |
+
|
| 103 |
+
:param gt_docs:
|
| 104 |
+
The ground truth documents.
|
| 105 |
+
:param ret_docs:
|
| 106 |
+
The retrieved documents.
|
| 107 |
+
:returns:
|
| 108 |
+
The discounted cumulative gain (DCG) of the retrieved
|
| 109 |
+
documents based on the ground truth documents.
|
| 110 |
+
"""
|
| 111 |
+
dcg = 0.0
|
| 112 |
+
relevant_id_to_score = {doc.id: doc.score if doc.score is not None else 1 for doc in gt_docs}
|
| 113 |
+
for i, doc in enumerate(ret_docs):
|
| 114 |
+
if doc.id in relevant_id_to_score: # TODO Related to https://github.com/deepset-ai/haystack/issues/8412
|
| 115 |
+
dcg += relevant_id_to_score[doc.id] / log2(i + 2) # i + 2 because i is 0-indexed
|
| 116 |
+
return dcg
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def calculate_idcg(gt_docs: List[Document]) -> float:
|
| 120 |
+
"""
|
| 121 |
+
Calculate the ideal discounted cumulative gain (IDCG) of the ground truth documents.
|
| 122 |
+
|
| 123 |
+
:param gt_docs:
|
| 124 |
+
The ground truth documents.
|
| 125 |
+
:returns:
|
| 126 |
+
The ideal discounted cumulative gain (IDCG) of the ground truth documents.
|
| 127 |
+
"""
|
| 128 |
+
idcg = 0.0
|
| 129 |
+
for i, doc in enumerate(sorted(gt_docs, key=lambda x: x.score if x.score is not None else 1, reverse=True)):
|
| 130 |
+
# If the document has a score, use it; otherwise, use 1 for binary relevance.
|
| 131 |
+
relevance = doc.score if doc.score is not None else 1
|
| 132 |
+
idcg += relevance / log2(i + 2) # i + 2 because i is 0-indexed
|
| 133 |
+
return idcg
|
testbed/deepset-ai__haystack/haystack/components/evaluators/document_recall.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from enum import Enum
|
| 6 |
+
from typing import Any, Dict, List, Union
|
| 7 |
+
|
| 8 |
+
from haystack import component, default_to_dict
|
| 9 |
+
from haystack.dataclasses import Document
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class RecallMode(Enum):
|
| 13 |
+
"""
|
| 14 |
+
Enum for the mode to use for calculating the recall score.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
# Score is based on whether any document is retrieved.
|
| 18 |
+
SINGLE_HIT = "single_hit"
|
| 19 |
+
# Score is based on how many documents were retrieved.
|
| 20 |
+
MULTI_HIT = "multi_hit"
|
| 21 |
+
|
| 22 |
+
def __str__(self):
|
| 23 |
+
return self.value
|
| 24 |
+
|
| 25 |
+
@staticmethod
|
| 26 |
+
def from_str(string: str) -> "RecallMode":
|
| 27 |
+
"""
|
| 28 |
+
Convert a string to a RecallMode enum.
|
| 29 |
+
"""
|
| 30 |
+
enum_map = {e.value: e for e in RecallMode}
|
| 31 |
+
mode = enum_map.get(string)
|
| 32 |
+
if mode is None:
|
| 33 |
+
msg = f"Unknown recall mode '{string}'. Supported modes are: {list(enum_map.keys())}"
|
| 34 |
+
raise ValueError(msg)
|
| 35 |
+
return mode
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@component
|
| 39 |
+
class DocumentRecallEvaluator:
|
| 40 |
+
"""
|
| 41 |
+
Evaluator that calculates the Recall score for a list of documents.
|
| 42 |
+
|
| 43 |
+
Returns both a list of scores for each question and the average.
|
| 44 |
+
There can be multiple ground truth documents and multiple predicted documents as input.
|
| 45 |
+
|
| 46 |
+
Usage example:
|
| 47 |
+
```python
|
| 48 |
+
from haystack import Document
|
| 49 |
+
from haystack.components.evaluators import DocumentRecallEvaluator
|
| 50 |
+
|
| 51 |
+
evaluator = DocumentRecallEvaluator()
|
| 52 |
+
result = evaluator.run(
|
| 53 |
+
ground_truth_documents=[
|
| 54 |
+
[Document(content="France")],
|
| 55 |
+
[Document(content="9th century"), Document(content="9th")],
|
| 56 |
+
],
|
| 57 |
+
retrieved_documents=[
|
| 58 |
+
[Document(content="France")],
|
| 59 |
+
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
|
| 60 |
+
],
|
| 61 |
+
)
|
| 62 |
+
print(result["individual_scores"])
|
| 63 |
+
# [1.0, 1.0]
|
| 64 |
+
print(result["score"])
|
| 65 |
+
# 1.0
|
| 66 |
+
```
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def __init__(self, mode: Union[str, RecallMode] = RecallMode.SINGLE_HIT):
|
| 70 |
+
"""
|
| 71 |
+
Create a DocumentRecallEvaluator component.
|
| 72 |
+
|
| 73 |
+
:param mode:
|
| 74 |
+
Mode to use for calculating the recall score.
|
| 75 |
+
"""
|
| 76 |
+
if isinstance(mode, str):
|
| 77 |
+
mode = RecallMode.from_str(mode)
|
| 78 |
+
|
| 79 |
+
mode_functions = {RecallMode.SINGLE_HIT: self._recall_single_hit, RecallMode.MULTI_HIT: self._recall_multi_hit}
|
| 80 |
+
self.mode_function = mode_functions[mode]
|
| 81 |
+
self.mode = mode
|
| 82 |
+
|
| 83 |
+
def _recall_single_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
|
| 84 |
+
unique_truths = {g.content for g in ground_truth_documents}
|
| 85 |
+
unique_retrievals = {p.content for p in retrieved_documents}
|
| 86 |
+
retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
|
| 87 |
+
|
| 88 |
+
return float(len(retrieved_ground_truths) > 0)
|
| 89 |
+
|
| 90 |
+
def _recall_multi_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
|
| 91 |
+
unique_truths = {g.content for g in ground_truth_documents}
|
| 92 |
+
unique_retrievals = {p.content for p in retrieved_documents}
|
| 93 |
+
retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
|
| 94 |
+
|
| 95 |
+
return len(retrieved_ground_truths) / len(ground_truth_documents)
|
| 96 |
+
|
| 97 |
+
@component.output_types(score=float, individual_scores=List[float])
|
| 98 |
+
def run(
|
| 99 |
+
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
|
| 100 |
+
) -> Dict[str, Any]:
|
| 101 |
+
"""
|
| 102 |
+
Run the DocumentRecallEvaluator on the given inputs.
|
| 103 |
+
|
| 104 |
+
`ground_truth_documents` and `retrieved_documents` must have the same length.
|
| 105 |
+
|
| 106 |
+
:param ground_truth_documents:
|
| 107 |
+
A list of expected documents for each question.
|
| 108 |
+
:param retrieved_documents:
|
| 109 |
+
A list of retrieved documents for each question.
|
| 110 |
+
A dictionary with the following outputs:
|
| 111 |
+
- `score` - The average of calculated scores.
|
| 112 |
+
- `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents the proportion of matching
|
| 113 |
+
documents retrieved. If the mode is `single_hit`, the individual scores are 0 or 1.
|
| 114 |
+
"""
|
| 115 |
+
if len(ground_truth_documents) != len(retrieved_documents):
|
| 116 |
+
msg = "The length of ground_truth_documents and retrieved_documents must be the same."
|
| 117 |
+
raise ValueError(msg)
|
| 118 |
+
|
| 119 |
+
scores = []
|
| 120 |
+
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
|
| 121 |
+
score = self.mode_function(ground_truth, retrieved)
|
| 122 |
+
scores.append(score)
|
| 123 |
+
|
| 124 |
+
return {"score": sum(scores) / len(retrieved_documents), "individual_scores": scores}
|
| 125 |
+
|
| 126 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 127 |
+
"""
|
| 128 |
+
Serializes the component to a dictionary.
|
| 129 |
+
|
| 130 |
+
:returns:
|
| 131 |
+
Dictionary with serialized data.
|
| 132 |
+
"""
|
| 133 |
+
return default_to_dict(self, mode=str(self.mode))
|
testbed/deepset-ai__haystack/haystack/components/evaluators/faithfulness.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from numpy import mean as np_mean
|
| 8 |
+
|
| 9 |
+
from haystack import component, default_from_dict, default_to_dict
|
| 10 |
+
from haystack.components.evaluators.llm_evaluator import LLMEvaluator
|
| 11 |
+
from haystack.utils import Secret, deserialize_secrets_inplace
|
| 12 |
+
|
| 13 |
+
# Default examples to include in the prompt if the user does not provide any examples
|
| 14 |
+
_DEFAULT_EXAMPLES = [
|
| 15 |
+
{
|
| 16 |
+
"inputs": {
|
| 17 |
+
"questions": "What is the capital of Germany and when was it founded?",
|
| 18 |
+
"contexts": ["Berlin is the capital of Germany and was founded in 1244."],
|
| 19 |
+
"predicted_answers": "The capital of Germany, Berlin, was founded in the 13th century.",
|
| 20 |
+
},
|
| 21 |
+
"outputs": {
|
| 22 |
+
"statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
|
| 23 |
+
"statement_scores": [1, 1],
|
| 24 |
+
},
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"inputs": {
|
| 28 |
+
"questions": "What is the capital of France?",
|
| 29 |
+
"contexts": ["Berlin is the capital of Germany."],
|
| 30 |
+
"predicted_answers": "Paris",
|
| 31 |
+
},
|
| 32 |
+
"outputs": {"statements": ["Paris is the capital of France."], "statement_scores": [0]},
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"inputs": {
|
| 36 |
+
"questions": "What is the capital of Italy?",
|
| 37 |
+
"contexts": ["Rome is the capital of Italy."],
|
| 38 |
+
"predicted_answers": "Rome is the capital of Italy with more than 4 million inhabitants.",
|
| 39 |
+
},
|
| 40 |
+
"outputs": {
|
| 41 |
+
"statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
|
| 42 |
+
"statement_scores": [1, 0],
|
| 43 |
+
},
|
| 44 |
+
},
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@component
|
| 49 |
+
class FaithfulnessEvaluator(LLMEvaluator):
|
| 50 |
+
"""
|
| 51 |
+
Evaluator that checks if a generated answer can be inferred from the provided contexts.
|
| 52 |
+
|
| 53 |
+
An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
|
| 54 |
+
context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
|
| 55 |
+
statements that can be inferred from the provided contexts.
|
| 56 |
+
|
| 57 |
+
Usage example:
|
| 58 |
+
```python
|
| 59 |
+
from haystack.components.evaluators import FaithfulnessEvaluator
|
| 60 |
+
|
| 61 |
+
questions = ["Who created the Python language?"]
|
| 62 |
+
contexts = [
|
| 63 |
+
[(
|
| 64 |
+
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
|
| 65 |
+
"language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
|
| 66 |
+
"programmers write clear, logical code for both small and large-scale software projects."
|
| 67 |
+
)],
|
| 68 |
+
]
|
| 69 |
+
predicted_answers = [
|
| 70 |
+
"Python is a high-level general-purpose programming language that was created by George Lucas."
|
| 71 |
+
]
|
| 72 |
+
evaluator = FaithfulnessEvaluator()
|
| 73 |
+
result = evaluator.run(questions=questions, contexts=contexts, predicted_answers=predicted_answers)
|
| 74 |
+
|
| 75 |
+
print(result["individual_scores"])
|
| 76 |
+
# [0.5]
|
| 77 |
+
print(result["score"])
|
| 78 |
+
# 0.5
|
| 79 |
+
print(result["results"])
|
| 80 |
+
# [{'statements': ['Python is a high-level general-purpose programming language.',
|
| 81 |
+
'Python was created by George Lucas.'], 'statement_scores': [1, 0], 'score': 0.5}]
|
| 82 |
+
```
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(
|
| 86 |
+
self,
|
| 87 |
+
examples: Optional[List[Dict[str, Any]]] = None,
|
| 88 |
+
progress_bar: bool = True,
|
| 89 |
+
api: str = "openai",
|
| 90 |
+
api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
|
| 91 |
+
api_params: Optional[Dict[str, Any]] = None,
|
| 92 |
+
raise_on_failure: bool = True,
|
| 93 |
+
):
|
| 94 |
+
"""
|
| 95 |
+
Creates an instance of FaithfulnessEvaluator.
|
| 96 |
+
|
| 97 |
+
:param examples:
|
| 98 |
+
Optional few-shot examples conforming to the expected input and output format of FaithfulnessEvaluator.
|
| 99 |
+
Default examples will be used if none are provided.
|
| 100 |
+
Each example must be a dictionary with keys "inputs" and "outputs".
|
| 101 |
+
"inputs" must be a dictionary with keys "questions", "contexts", and "predicted_answers".
|
| 102 |
+
"outputs" must be a dictionary with "statements" and "statement_scores".
|
| 103 |
+
Expected format:
|
| 104 |
+
[{
|
| 105 |
+
"inputs": {
|
| 106 |
+
"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
|
| 107 |
+
"predicted_answers": "Rome is the capital of Italy with more than 4 million inhabitants.",
|
| 108 |
+
},
|
| 109 |
+
"outputs": {
|
| 110 |
+
"statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
|
| 111 |
+
"statement_scores": [1, 0],
|
| 112 |
+
},
|
| 113 |
+
}]
|
| 114 |
+
:param progress_bar:
|
| 115 |
+
Whether to show a progress bar during the evaluation.
|
| 116 |
+
:param api:
|
| 117 |
+
The API to use for calling an LLM through a Generator.
|
| 118 |
+
Supported APIs: "openai".
|
| 119 |
+
:param api_key:
|
| 120 |
+
The API key.
|
| 121 |
+
:param api_params:
|
| 122 |
+
Parameters for an OpenAI API compatible completions call.
|
| 123 |
+
:param raise_on_failure:
|
| 124 |
+
Whether to raise an exception if the API call fails.
|
| 125 |
+
|
| 126 |
+
"""
|
| 127 |
+
self.instructions = (
|
| 128 |
+
"Your task is to judge the faithfulness or groundedness of statements based "
|
| 129 |
+
"on context information. First, please extract statements from a provided "
|
| 130 |
+
"predicted answer to a question. Second, calculate a faithfulness score for each "
|
| 131 |
+
"statement made in the predicted answer. The score is 1 if the statement can be "
|
| 132 |
+
"inferred from the provided context or 0 if it cannot be inferred."
|
| 133 |
+
)
|
| 134 |
+
self.inputs = [("questions", List[str]), ("contexts", List[List[str]]), ("predicted_answers", List[str])]
|
| 135 |
+
self.outputs = ["statements", "statement_scores"]
|
| 136 |
+
self.examples = examples or _DEFAULT_EXAMPLES
|
| 137 |
+
self.api = api
|
| 138 |
+
self.api_key = api_key
|
| 139 |
+
self.api_params = api_params or {}
|
| 140 |
+
|
| 141 |
+
super(FaithfulnessEvaluator, self).__init__(
|
| 142 |
+
instructions=self.instructions,
|
| 143 |
+
inputs=self.inputs,
|
| 144 |
+
outputs=self.outputs,
|
| 145 |
+
examples=self.examples,
|
| 146 |
+
api=self.api,
|
| 147 |
+
api_key=self.api_key,
|
| 148 |
+
api_params=self.api_params,
|
| 149 |
+
raise_on_failure=raise_on_failure,
|
| 150 |
+
progress_bar=progress_bar,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
@component.output_types(individual_scores=List[int], score=float, results=List[Dict[str, Any]])
|
| 154 |
+
def run(self, **inputs) -> Dict[str, Any]:
|
| 155 |
+
"""
|
| 156 |
+
Run the LLM evaluator.
|
| 157 |
+
|
| 158 |
+
:param questions:
|
| 159 |
+
A list of questions.
|
| 160 |
+
:param contexts:
|
| 161 |
+
A nested list of contexts that correspond to the questions.
|
| 162 |
+
:param predicted_answers:
|
| 163 |
+
A list of predicted answers.
|
| 164 |
+
:returns:
|
| 165 |
+
A dictionary with the following outputs:
|
| 166 |
+
- `score`: Mean faithfulness score over all the provided input answers.
|
| 167 |
+
- `individual_scores`: A list of faithfulness scores for each input answer.
|
| 168 |
+
- `results`: A list of dictionaries with `statements` and `statement_scores` for each input answer.
|
| 169 |
+
"""
|
| 170 |
+
result = super(FaithfulnessEvaluator, self).run(**inputs)
|
| 171 |
+
|
| 172 |
+
# calculate average statement faithfulness score per query
|
| 173 |
+
for idx, res in enumerate(result["results"]):
|
| 174 |
+
if res is None:
|
| 175 |
+
result["results"][idx] = {"statements": [], "statement_scores": [], "score": float("nan")}
|
| 176 |
+
continue
|
| 177 |
+
if not res["statements"]:
|
| 178 |
+
res["score"] = 0
|
| 179 |
+
else:
|
| 180 |
+
res["score"] = np_mean(res["statement_scores"])
|
| 181 |
+
|
| 182 |
+
# calculate average answer faithfulness score over all queries
|
| 183 |
+
result["score"] = np_mean([res["score"] for res in result["results"]])
|
| 184 |
+
result["individual_scores"] = [res["score"] for res in result["results"]]
|
| 185 |
+
|
| 186 |
+
return result
|
| 187 |
+
|
| 188 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 189 |
+
"""
|
| 190 |
+
Serialize this component to a dictionary.
|
| 191 |
+
|
| 192 |
+
:returns:
|
| 193 |
+
A dictionary with serialized data.
|
| 194 |
+
"""
|
| 195 |
+
return default_to_dict(
|
| 196 |
+
self,
|
| 197 |
+
api=self.api,
|
| 198 |
+
api_key=self.api_key.to_dict() if self.api_key else None,
|
| 199 |
+
api_params=self.api_params,
|
| 200 |
+
examples=self.examples,
|
| 201 |
+
progress_bar=self.progress_bar,
|
| 202 |
+
raise_on_failure=self.raise_on_failure,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
@classmethod
|
| 206 |
+
def from_dict(cls, data: Dict[str, Any]) -> "FaithfulnessEvaluator":
|
| 207 |
+
"""
|
| 208 |
+
Deserialize this component from a dictionary.
|
| 209 |
+
|
| 210 |
+
:param data:
|
| 211 |
+
The dictionary representation of this component.
|
| 212 |
+
:returns:
|
| 213 |
+
The deserialized component instance.
|
| 214 |
+
"""
|
| 215 |
+
deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
|
| 216 |
+
return default_from_dict(cls, data)
|
testbed/deepset-ai__haystack/haystack/components/evaluators/llm_evaluator.py
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from typing import Any, Dict, List, Optional, Tuple, Type
|
| 7 |
+
from warnings import warn
|
| 8 |
+
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from haystack import component, default_from_dict, default_to_dict
|
| 12 |
+
from haystack.components.builders import PromptBuilder
|
| 13 |
+
from haystack.components.generators import OpenAIGenerator
|
| 14 |
+
from haystack.utils import Secret, deserialize_secrets_inplace, deserialize_type, serialize_type
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@component
|
| 18 |
+
class LLMEvaluator:
|
| 19 |
+
"""
|
| 20 |
+
Uses an LLM to evaluate inputs based on a prompt containing instructions and examples.
|
| 21 |
+
|
| 22 |
+
The default API requires an OpenAI API key to be provided as an environment variable "OPENAI_API_KEY".
|
| 23 |
+
The inputs are lists that are user-defined depending on the desired metric.
|
| 24 |
+
The output is a dictionary with a key `results` containing a list of evaluation results.
|
| 25 |
+
Each result is a dictionary with user-defined keys and values of either 0 for FALSE or 1 for TRUE respectively.
|
| 26 |
+
|
| 27 |
+
Usage example:
|
| 28 |
+
```python
|
| 29 |
+
from typing import List
|
| 30 |
+
from haystack.components.evaluators import LLMEvaluator
|
| 31 |
+
evaluator = LLMEvaluator(
|
| 32 |
+
instructions="Is this answer problematic for children?",
|
| 33 |
+
inputs=[("predicted_answers", List[str])],
|
| 34 |
+
outputs=["score"],
|
| 35 |
+
examples=[
|
| 36 |
+
{"inputs": {"predicted_answers": "Damn, this is straight outta hell!!!"}, "outputs": {"score": 1}},
|
| 37 |
+
{"inputs": {"predicted_answers": "Football is the most popular sport."}, "outputs": {"score": 0}},
|
| 38 |
+
],
|
| 39 |
+
)
|
| 40 |
+
predicted_answers = [
|
| 41 |
+
"Football is the most popular sport with around 4 billion followers worldwide",
|
| 42 |
+
"Python language was created by Guido van Rossum.",
|
| 43 |
+
]
|
| 44 |
+
results = evaluator.run(predicted_answers=predicted_answers)
|
| 45 |
+
print(results)
|
| 46 |
+
# {'results': [{'score': 0}, {'score': 0}]}
|
| 47 |
+
```
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(
|
| 51 |
+
self,
|
| 52 |
+
instructions: str,
|
| 53 |
+
inputs: List[Tuple[str, Type[List]]],
|
| 54 |
+
outputs: List[str],
|
| 55 |
+
examples: List[Dict[str, Any]],
|
| 56 |
+
progress_bar: bool = True,
|
| 57 |
+
*,
|
| 58 |
+
raise_on_failure: bool = True,
|
| 59 |
+
api: str = "openai",
|
| 60 |
+
api_key: Optional[Secret] = None,
|
| 61 |
+
api_params: Optional[Dict[str, Any]] = None,
|
| 62 |
+
):
|
| 63 |
+
"""
|
| 64 |
+
Creates an instance of LLMEvaluator.
|
| 65 |
+
|
| 66 |
+
:param instructions:
|
| 67 |
+
The prompt instructions to use for evaluation.
|
| 68 |
+
Should be a question about the inputs that can be answered with yes or no.
|
| 69 |
+
:param inputs:
|
| 70 |
+
The inputs that the component expects as incoming connections and that it evaluates.
|
| 71 |
+
Each input is a tuple of an input name and input type. Input types must be lists.
|
| 72 |
+
:param outputs:
|
| 73 |
+
Output names of the evaluation results. They correspond to keys in the output dictionary.
|
| 74 |
+
:param examples:
|
| 75 |
+
Few-shot examples conforming to the expected input and output format as defined in the `inputs` and
|
| 76 |
+
`outputs` parameters.
|
| 77 |
+
Each example is a dictionary with keys "inputs" and "outputs"
|
| 78 |
+
They contain the input and output as dictionaries respectively.
|
| 79 |
+
:param raise_on_failure:
|
| 80 |
+
If True, the component will raise an exception on an unsuccessful API call.
|
| 81 |
+
:param progress_bar:
|
| 82 |
+
Whether to show a progress bar during the evaluation.
|
| 83 |
+
:param api:
|
| 84 |
+
The API to use for calling an LLM through a Generator.
|
| 85 |
+
Supported APIs: "openai".
|
| 86 |
+
:param api_key:
|
| 87 |
+
The API key to be passed to a LLM provider. It may not be necessary when using a locally hosted model.
|
| 88 |
+
:param api_params:
|
| 89 |
+
Parameters for an OpenAI API compatible completions call.
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
self.validate_init_parameters(inputs, outputs, examples)
|
| 93 |
+
self.raise_on_failure = raise_on_failure
|
| 94 |
+
self.instructions = instructions
|
| 95 |
+
self.inputs = inputs
|
| 96 |
+
self.outputs = outputs
|
| 97 |
+
self.examples = examples
|
| 98 |
+
self.api = api
|
| 99 |
+
self.api_key = api_key
|
| 100 |
+
self.api_params = api_params or {}
|
| 101 |
+
self.progress_bar = progress_bar
|
| 102 |
+
|
| 103 |
+
default_generation_kwargs = {"response_format": {"type": "json_object"}, "seed": 42}
|
| 104 |
+
user_generation_kwargs = self.api_params.get("generation_kwargs", {})
|
| 105 |
+
merged_generation_kwargs = {**default_generation_kwargs, **user_generation_kwargs}
|
| 106 |
+
self.api_params["generation_kwargs"] = merged_generation_kwargs
|
| 107 |
+
|
| 108 |
+
if api == "openai":
|
| 109 |
+
generator_kwargs = {**self.api_params}
|
| 110 |
+
if api_key:
|
| 111 |
+
generator_kwargs["api_key"] = api_key
|
| 112 |
+
self.generator = OpenAIGenerator(**generator_kwargs)
|
| 113 |
+
else:
|
| 114 |
+
raise ValueError(f"Unsupported API: {api}")
|
| 115 |
+
|
| 116 |
+
template = self.prepare_template()
|
| 117 |
+
self.builder = PromptBuilder(template=template)
|
| 118 |
+
|
| 119 |
+
component.set_input_types(self, **dict(inputs))
|
| 120 |
+
|
| 121 |
+
@staticmethod
|
| 122 |
+
def validate_init_parameters(
|
| 123 |
+
inputs: List[Tuple[str, Type[List]]], outputs: List[str], examples: List[Dict[str, Any]]
|
| 124 |
+
):
|
| 125 |
+
"""
|
| 126 |
+
Validate the init parameters.
|
| 127 |
+
|
| 128 |
+
:param inputs:
|
| 129 |
+
The inputs to validate.
|
| 130 |
+
:param outputs:
|
| 131 |
+
The outputs to validate.
|
| 132 |
+
:param examples:
|
| 133 |
+
The examples to validate.
|
| 134 |
+
|
| 135 |
+
:raises ValueError:
|
| 136 |
+
If the inputs are not a list of tuples with a string and a type of list.
|
| 137 |
+
If the outputs are not a list of strings.
|
| 138 |
+
If the examples are not a list of dictionaries.
|
| 139 |
+
If any example does not have keys "inputs" and "outputs" with values that are dictionaries with string keys.
|
| 140 |
+
"""
|
| 141 |
+
# Validate inputs
|
| 142 |
+
if (
|
| 143 |
+
not isinstance(inputs, list)
|
| 144 |
+
or not all(isinstance(_input, tuple) for _input in inputs)
|
| 145 |
+
or not all(isinstance(_input[0], str) and _input[1] is not list and len(_input) == 2 for _input in inputs)
|
| 146 |
+
):
|
| 147 |
+
msg = (
|
| 148 |
+
f"LLM evaluator expects inputs to be a list of tuples. Each tuple must contain an input name and "
|
| 149 |
+
f"type of list but received {inputs}."
|
| 150 |
+
)
|
| 151 |
+
raise ValueError(msg)
|
| 152 |
+
|
| 153 |
+
# Validate outputs
|
| 154 |
+
if not isinstance(outputs, list) or not all(isinstance(output, str) for output in outputs):
|
| 155 |
+
msg = f"LLM evaluator expects outputs to be a list of str but received {outputs}."
|
| 156 |
+
raise ValueError(msg)
|
| 157 |
+
|
| 158 |
+
# Validate examples are lists of dicts
|
| 159 |
+
if not isinstance(examples, list) or not all(isinstance(example, dict) for example in examples):
|
| 160 |
+
msg = f"LLM evaluator expects examples to be a list of dictionaries but received {examples}."
|
| 161 |
+
raise ValueError(msg)
|
| 162 |
+
|
| 163 |
+
# Validate each example
|
| 164 |
+
for example in examples:
|
| 165 |
+
if (
|
| 166 |
+
{"inputs", "outputs"} != example.keys()
|
| 167 |
+
or not all(isinstance(example[param], dict) for param in ["inputs", "outputs"])
|
| 168 |
+
or not all(isinstance(key, str) for param in ["inputs", "outputs"] for key in example[param])
|
| 169 |
+
):
|
| 170 |
+
msg = (
|
| 171 |
+
f"LLM evaluator expects each example to have keys `inputs` and `outputs` with values that are "
|
| 172 |
+
f"dictionaries with str keys but received {example}."
|
| 173 |
+
)
|
| 174 |
+
raise ValueError(msg)
|
| 175 |
+
|
| 176 |
+
@component.output_types(results=List[Dict[str, Any]])
|
| 177 |
+
def run(self, **inputs) -> Dict[str, Any]:
|
| 178 |
+
"""
|
| 179 |
+
Run the LLM evaluator.
|
| 180 |
+
|
| 181 |
+
:param inputs:
|
| 182 |
+
The input values to evaluate. The keys are the input names and the values are lists of input values.
|
| 183 |
+
:returns:
|
| 184 |
+
A dictionary with a `results` entry that contains a list of results.
|
| 185 |
+
Each result is a dictionary containing the keys as defined in the `outputs` parameter of the LLMEvaluator
|
| 186 |
+
and the evaluation results as the values. If an exception occurs for a particular input value, the result
|
| 187 |
+
will be `None` for that entry.
|
| 188 |
+
If the API is "openai" and the response contains a "meta" key, the metadata from OpenAI will be included
|
| 189 |
+
in the output dictionary, under the key "meta".
|
| 190 |
+
:raises ValueError:
|
| 191 |
+
Only in the case that `raise_on_failure` is set to True and the received inputs are not lists or have
|
| 192 |
+
different lengths, or if the output is not a valid JSON or doesn't contain the expected keys.
|
| 193 |
+
"""
|
| 194 |
+
self.validate_input_parameters(dict(self.inputs), inputs)
|
| 195 |
+
|
| 196 |
+
# inputs is a dictionary with keys being input names and values being a list of input values
|
| 197 |
+
# We need to iterate through the lists in parallel for all keys of the dictionary
|
| 198 |
+
input_names, values = inputs.keys(), list(zip(*inputs.values()))
|
| 199 |
+
list_of_input_names_to_values = [dict(zip(input_names, v)) for v in values]
|
| 200 |
+
|
| 201 |
+
results: List[Optional[Dict[str, Any]]] = []
|
| 202 |
+
metadata = None
|
| 203 |
+
errors = 0
|
| 204 |
+
for input_names_to_values in tqdm(list_of_input_names_to_values, disable=not self.progress_bar):
|
| 205 |
+
prompt = self.builder.run(**input_names_to_values)
|
| 206 |
+
try:
|
| 207 |
+
result = self.generator.run(prompt=prompt["prompt"])
|
| 208 |
+
except Exception as e:
|
| 209 |
+
msg = f"Error while generating response for prompt: {prompt}. Error: {e}"
|
| 210 |
+
if self.raise_on_failure:
|
| 211 |
+
raise ValueError(msg)
|
| 212 |
+
warn(msg)
|
| 213 |
+
results.append(None)
|
| 214 |
+
errors += 1
|
| 215 |
+
continue
|
| 216 |
+
|
| 217 |
+
if self.is_valid_json_and_has_expected_keys(expected=self.outputs, received=result["replies"][0]):
|
| 218 |
+
parsed_result = json.loads(result["replies"][0])
|
| 219 |
+
results.append(parsed_result)
|
| 220 |
+
else:
|
| 221 |
+
results.append(None)
|
| 222 |
+
errors += 1
|
| 223 |
+
|
| 224 |
+
if self.api == "openai" and "meta" in result:
|
| 225 |
+
metadata = result["meta"]
|
| 226 |
+
|
| 227 |
+
if errors > 0:
|
| 228 |
+
msg = f"LLM evaluator failed for {errors} out of {len(list_of_input_names_to_values)} inputs."
|
| 229 |
+
warn(msg)
|
| 230 |
+
|
| 231 |
+
return {"results": results, "meta": metadata}
|
| 232 |
+
|
| 233 |
+
def prepare_template(self) -> str:
|
| 234 |
+
"""
|
| 235 |
+
Prepare the prompt template.
|
| 236 |
+
|
| 237 |
+
Combine instructions, inputs, outputs, and examples into one prompt template with the following format:
|
| 238 |
+
Instructions:
|
| 239 |
+
<instructions>
|
| 240 |
+
|
| 241 |
+
Generate the response in JSON format with the following keys:
|
| 242 |
+
<list of output keys>
|
| 243 |
+
Consider the instructions and the examples below to determine those values.
|
| 244 |
+
|
| 245 |
+
Examples:
|
| 246 |
+
<examples>
|
| 247 |
+
|
| 248 |
+
Inputs:
|
| 249 |
+
<inputs>
|
| 250 |
+
Outputs:
|
| 251 |
+
|
| 252 |
+
:returns:
|
| 253 |
+
The prompt template.
|
| 254 |
+
"""
|
| 255 |
+
inputs_section = (
|
| 256 |
+
"{" + ", ".join([f'"{input_socket[0]}": {{{{ {input_socket[0]} }}}}' for input_socket in self.inputs]) + "}"
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
examples_section = "\n".join(
|
| 260 |
+
[
|
| 261 |
+
"Inputs:\n" + json.dumps(example["inputs"]) + "\nOutputs:\n" + json.dumps(example["outputs"])
|
| 262 |
+
for example in self.examples
|
| 263 |
+
]
|
| 264 |
+
)
|
| 265 |
+
return (
|
| 266 |
+
f"Instructions:\n"
|
| 267 |
+
f"{self.instructions}\n\n"
|
| 268 |
+
f"Generate the response in JSON format with the following keys:\n"
|
| 269 |
+
f"{json.dumps(self.outputs)}\n"
|
| 270 |
+
f"Consider the instructions and the examples below to determine those values.\n\n"
|
| 271 |
+
f"Examples:\n"
|
| 272 |
+
f"{examples_section}\n\n"
|
| 273 |
+
f"Inputs:\n"
|
| 274 |
+
f"{inputs_section}\n"
|
| 275 |
+
f"Outputs:\n"
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 279 |
+
"""
|
| 280 |
+
Serialize this component to a dictionary.
|
| 281 |
+
|
| 282 |
+
:returns:
|
| 283 |
+
The serialized component as a dictionary.
|
| 284 |
+
"""
|
| 285 |
+
# Since we cannot currently serialize tuples, convert the inputs to a list.
|
| 286 |
+
inputs = [[name, serialize_type(type_)] for name, type_ in self.inputs]
|
| 287 |
+
return default_to_dict(
|
| 288 |
+
self,
|
| 289 |
+
instructions=self.instructions,
|
| 290 |
+
inputs=inputs,
|
| 291 |
+
outputs=self.outputs,
|
| 292 |
+
examples=self.examples,
|
| 293 |
+
api=self.api,
|
| 294 |
+
api_key=self.api_key and self.api_key.to_dict(),
|
| 295 |
+
api_params=self.api_params,
|
| 296 |
+
progress_bar=self.progress_bar,
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
@classmethod
|
| 300 |
+
def from_dict(cls, data: Dict[str, Any]) -> "LLMEvaluator":
|
| 301 |
+
"""
|
| 302 |
+
Deserialize this component from a dictionary.
|
| 303 |
+
|
| 304 |
+
:param data:
|
| 305 |
+
The dictionary representation of this component.
|
| 306 |
+
:returns:
|
| 307 |
+
The deserialized component instance.
|
| 308 |
+
"""
|
| 309 |
+
data["init_parameters"]["inputs"] = [
|
| 310 |
+
(name, deserialize_type(type_)) for name, type_ in data["init_parameters"]["inputs"]
|
| 311 |
+
]
|
| 312 |
+
|
| 313 |
+
deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
|
| 314 |
+
return default_from_dict(cls, data)
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def validate_input_parameters(expected: Dict[str, Any], received: Dict[str, Any]) -> None:
|
| 318 |
+
"""
|
| 319 |
+
Validate the input parameters.
|
| 320 |
+
|
| 321 |
+
:param expected:
|
| 322 |
+
The expected input parameters.
|
| 323 |
+
:param received:
|
| 324 |
+
The received input parameters.
|
| 325 |
+
|
| 326 |
+
:raises ValueError:
|
| 327 |
+
If not all expected inputs are present in the received inputs
|
| 328 |
+
If the received inputs are not lists or have different lengths
|
| 329 |
+
"""
|
| 330 |
+
# Validate that all expected inputs are present in the received inputs
|
| 331 |
+
for param in expected.keys():
|
| 332 |
+
if param not in received:
|
| 333 |
+
msg = f"LLM evaluator expected input parameter '{param}' but received only {received.keys()}."
|
| 334 |
+
raise ValueError(msg)
|
| 335 |
+
|
| 336 |
+
# Validate that all received inputs are lists
|
| 337 |
+
if not all(isinstance(_input, list) for _input in received.values()):
|
| 338 |
+
msg = (
|
| 339 |
+
"LLM evaluator expects all input values to be lists but received "
|
| 340 |
+
f"{[type(_input) for _input in received.values()]}."
|
| 341 |
+
)
|
| 342 |
+
raise ValueError(msg)
|
| 343 |
+
|
| 344 |
+
# Validate that all received inputs are of the same length
|
| 345 |
+
inputs = received.values()
|
| 346 |
+
length = len(next(iter(inputs)))
|
| 347 |
+
if not all(len(_input) == length for _input in inputs):
|
| 348 |
+
msg = (
|
| 349 |
+
f"LLM evaluator expects all input lists to have the same length but received {inputs} with lengths "
|
| 350 |
+
f"{[len(_input) for _input in inputs]}."
|
| 351 |
+
)
|
| 352 |
+
raise ValueError(msg)
|
| 353 |
+
|
| 354 |
+
def is_valid_json_and_has_expected_keys(self, expected: List[str], received: str) -> bool:
|
| 355 |
+
"""
|
| 356 |
+
Output must be a valid JSON with the expected keys.
|
| 357 |
+
|
| 358 |
+
:param expected:
|
| 359 |
+
Names of expected outputs
|
| 360 |
+
:param received:
|
| 361 |
+
Names of received outputs
|
| 362 |
+
|
| 363 |
+
:raises ValueError:
|
| 364 |
+
If the output is not a valid JSON with the expected keys:
|
| 365 |
+
- with `raise_on_failure` set to True a ValueError is raised.
|
| 366 |
+
- with `raise_on_failure` set to False a warning is issued and False is returned.
|
| 367 |
+
|
| 368 |
+
:returns:
|
| 369 |
+
True if the received output is a valid JSON with the expected keys, False otherwise.
|
| 370 |
+
"""
|
| 371 |
+
try:
|
| 372 |
+
parsed_output = json.loads(received)
|
| 373 |
+
except json.JSONDecodeError:
|
| 374 |
+
msg = "Response from LLM evaluator is not a valid JSON."
|
| 375 |
+
if self.raise_on_failure:
|
| 376 |
+
raise ValueError(msg)
|
| 377 |
+
warn(msg)
|
| 378 |
+
return False
|
| 379 |
+
|
| 380 |
+
if not all(output in parsed_output for output in expected):
|
| 381 |
+
msg = f"Expected response from LLM evaluator to be JSON with keys {expected}, got {received}."
|
| 382 |
+
if self.raise_on_failure:
|
| 383 |
+
raise ValueError(msg)
|
| 384 |
+
warn(msg)
|
| 385 |
+
return False
|
| 386 |
+
|
| 387 |
+
return True
|
testbed/deepset-ai__haystack/haystack/components/evaluators/sas_evaluator.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from numpy import mean as np_mean
|
| 8 |
+
|
| 9 |
+
from haystack import component, default_from_dict, default_to_dict
|
| 10 |
+
from haystack.lazy_imports import LazyImport
|
| 11 |
+
from haystack.utils import ComponentDevice, expit
|
| 12 |
+
from haystack.utils.auth import Secret, deserialize_secrets_inplace
|
| 13 |
+
|
| 14 |
+
with LazyImport(message="Run 'pip install \"sentence-transformers>=3.0.0\"'") as sas_import:
|
| 15 |
+
from sentence_transformers import CrossEncoder, SentenceTransformer, util
|
| 16 |
+
from transformers import AutoConfig
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@component
|
| 20 |
+
class SASEvaluator:
|
| 21 |
+
"""
|
| 22 |
+
SASEvaluator computes the Semantic Answer Similarity (SAS) between a list of predictions and a one of ground truths.
|
| 23 |
+
|
| 24 |
+
It's usually used in Retrieval Augmented Generation (RAG) pipelines to evaluate the quality of the generated
|
| 25 |
+
answers. The SAS is computed using a pre-trained model from the Hugging Face model hub. The model can be either a
|
| 26 |
+
Bi-Encoder or a Cross-Encoder. The choice of the model is based on the `model` parameter.
|
| 27 |
+
|
| 28 |
+
Usage example:
|
| 29 |
+
```python
|
| 30 |
+
from haystack.components.evaluators.sas_evaluator import SASEvaluator
|
| 31 |
+
|
| 32 |
+
evaluator = SASEvaluator(model="cross-encoder/ms-marco-MiniLM-L-6-v2")
|
| 33 |
+
evaluator.warm_up()
|
| 34 |
+
ground_truths = [
|
| 35 |
+
"A construction budget of US $2.3 billion",
|
| 36 |
+
"The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
|
| 37 |
+
"The Meiji Restoration in 1868 transformed Japan into a modernized world power.",
|
| 38 |
+
]
|
| 39 |
+
predictions = [
|
| 40 |
+
"A construction budget of US $2.3 billion",
|
| 41 |
+
"The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
|
| 42 |
+
"The Meiji Restoration in 1868 transformed Japan into a modernized world power.",
|
| 43 |
+
]
|
| 44 |
+
result = evaluator.run(
|
| 45 |
+
ground_truths_answers=ground_truths, predicted_answers=predictions
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
print(result["score"])
|
| 49 |
+
# 0.9999673763910929
|
| 50 |
+
|
| 51 |
+
print(result["individual_scores"])
|
| 52 |
+
# [0.9999765157699585, 0.999968409538269, 0.9999572038650513]
|
| 53 |
+
```
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
model: str = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2",
|
| 59 |
+
batch_size: int = 32,
|
| 60 |
+
device: Optional[ComponentDevice] = None,
|
| 61 |
+
token: Secret = Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False),
|
| 62 |
+
):
|
| 63 |
+
"""
|
| 64 |
+
Creates a new instance of SASEvaluator.
|
| 65 |
+
|
| 66 |
+
:param model:
|
| 67 |
+
SentenceTransformers semantic textual similarity model, should be path or string pointing to a downloadable
|
| 68 |
+
model.
|
| 69 |
+
:param batch_size:
|
| 70 |
+
Number of prediction-label pairs to encode at once.
|
| 71 |
+
:param device:
|
| 72 |
+
The device on which the model is loaded. If `None`, the default device is automatically selected.
|
| 73 |
+
:param token:
|
| 74 |
+
The Hugging Face token for HTTP bearer authorization.
|
| 75 |
+
You can find your HF token in your [account settings](https://huggingface.co/settings/tokens)
|
| 76 |
+
"""
|
| 77 |
+
sas_import.check()
|
| 78 |
+
|
| 79 |
+
self._model = model
|
| 80 |
+
self._batch_size = batch_size
|
| 81 |
+
self._device = device
|
| 82 |
+
self._token = token
|
| 83 |
+
self._similarity_model = None
|
| 84 |
+
|
| 85 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 86 |
+
"""
|
| 87 |
+
Serialize this component to a dictionary.
|
| 88 |
+
|
| 89 |
+
:returns:
|
| 90 |
+
The serialized component as a dictionary.
|
| 91 |
+
"""
|
| 92 |
+
return default_to_dict(
|
| 93 |
+
self,
|
| 94 |
+
model=self._model,
|
| 95 |
+
batch_size=self._batch_size,
|
| 96 |
+
device=self._device.to_dict() if self._device else None,
|
| 97 |
+
token=self._token.to_dict() if self._token else None,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
@classmethod
|
| 101 |
+
def from_dict(cls, data: Dict[str, Any]) -> "SASEvaluator":
|
| 102 |
+
"""
|
| 103 |
+
Deserialize this component from a dictionary.
|
| 104 |
+
|
| 105 |
+
:param data:
|
| 106 |
+
The dictionary representation of this component.
|
| 107 |
+
:returns:
|
| 108 |
+
The deserialized component instance.
|
| 109 |
+
"""
|
| 110 |
+
deserialize_secrets_inplace(data["init_parameters"], keys=["token"])
|
| 111 |
+
if device := data.get("init_parameters", {}).get("device"):
|
| 112 |
+
data["init_parameters"]["device"] = ComponentDevice.from_dict(device)
|
| 113 |
+
return default_from_dict(cls, data)
|
| 114 |
+
|
| 115 |
+
def warm_up(self):
|
| 116 |
+
"""
|
| 117 |
+
Initializes the component.
|
| 118 |
+
"""
|
| 119 |
+
if self._similarity_model:
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
token = self._token.resolve_value() if self._token else None
|
| 123 |
+
config = AutoConfig.from_pretrained(self._model, use_auth_token=token)
|
| 124 |
+
cross_encoder_used = False
|
| 125 |
+
if config.architectures:
|
| 126 |
+
cross_encoder_used = any(arch.endswith("ForSequenceClassification") for arch in config.architectures)
|
| 127 |
+
device = ComponentDevice.resolve_device(self._device).to_torch_str()
|
| 128 |
+
# Based on the Model string we can load either Bi-Encoders or Cross Encoders.
|
| 129 |
+
# Similarity computation changes for both approaches
|
| 130 |
+
if cross_encoder_used:
|
| 131 |
+
self._similarity_model = CrossEncoder(
|
| 132 |
+
self._model,
|
| 133 |
+
device=device,
|
| 134 |
+
tokenizer_args={"use_auth_token": token},
|
| 135 |
+
automodel_args={"use_auth_token": token},
|
| 136 |
+
)
|
| 137 |
+
else:
|
| 138 |
+
self._similarity_model = SentenceTransformer(self._model, device=device, use_auth_token=token)
|
| 139 |
+
|
| 140 |
+
@component.output_types(score=float, individual_scores=List[float])
|
| 141 |
+
def run(self, ground_truth_answers: List[str], predicted_answers: List[str]) -> Dict[str, Any]:
|
| 142 |
+
"""
|
| 143 |
+
SASEvaluator component run method.
|
| 144 |
+
|
| 145 |
+
Run the SASEvaluator to compute the Semantic Answer Similarity (SAS) between a list of predicted answers
|
| 146 |
+
and a list of ground truth answers. Both must be list of strings of same length.
|
| 147 |
+
|
| 148 |
+
:param ground_truth_answers:
|
| 149 |
+
A list of expected answers for each question.
|
| 150 |
+
:param predicted_answers:
|
| 151 |
+
A list of generated answers for each question.
|
| 152 |
+
:returns:
|
| 153 |
+
A dictionary with the following outputs:
|
| 154 |
+
- `score`: Mean SAS score over all the predictions/ground-truth pairs.
|
| 155 |
+
- `individual_scores`: A list of similarity scores for each prediction/ground-truth pair.
|
| 156 |
+
"""
|
| 157 |
+
if len(ground_truth_answers) != len(predicted_answers):
|
| 158 |
+
raise ValueError("The number of predictions and labels must be the same.")
|
| 159 |
+
|
| 160 |
+
if any(answer is None for answer in predicted_answers):
|
| 161 |
+
raise ValueError("Predicted answers must not contain None values.")
|
| 162 |
+
|
| 163 |
+
if len(predicted_answers) == 0:
|
| 164 |
+
return {"score": 0.0, "individual_scores": [0.0]}
|
| 165 |
+
|
| 166 |
+
if not self._similarity_model:
|
| 167 |
+
msg = "The model has not been initialized. Call warm_up() before running the evaluator."
|
| 168 |
+
raise RuntimeError(msg)
|
| 169 |
+
|
| 170 |
+
if isinstance(self._similarity_model, CrossEncoder):
|
| 171 |
+
# For Cross Encoders we create a list of pairs of predictions and labels
|
| 172 |
+
sentence_pairs = list(zip(predicted_answers, ground_truth_answers))
|
| 173 |
+
similarity_scores = self._similarity_model.predict(
|
| 174 |
+
sentence_pairs, batch_size=self._batch_size, convert_to_numpy=True
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# All Cross Encoders do not return a set of logits scores that are normalized
|
| 178 |
+
# We normalize scores if they are larger than 1
|
| 179 |
+
if (similarity_scores > 1).any():
|
| 180 |
+
similarity_scores = expit(similarity_scores)
|
| 181 |
+
|
| 182 |
+
# Convert scores to list of floats from numpy array
|
| 183 |
+
similarity_scores = similarity_scores.tolist()
|
| 184 |
+
|
| 185 |
+
else:
|
| 186 |
+
# For Bi-encoders we create embeddings separately for predictions and labels
|
| 187 |
+
predictions_embeddings = self._similarity_model.encode(
|
| 188 |
+
predicted_answers, batch_size=self._batch_size, convert_to_tensor=True
|
| 189 |
+
)
|
| 190 |
+
label_embeddings = self._similarity_model.encode(
|
| 191 |
+
ground_truth_answers, batch_size=self._batch_size, convert_to_tensor=True
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Compute cosine-similarities
|
| 195 |
+
similarity_scores = [
|
| 196 |
+
float(util.cos_sim(p, l).cpu().numpy()) for p, l in zip(predictions_embeddings, label_embeddings)
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
sas_score = np_mean(similarity_scores)
|
| 200 |
+
|
| 201 |
+
return {"score": sas_score, "individual_scores": similarity_scores}
|
testbed/deepset-ai__haystack/haystack/components/extractors/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.extractors.named_entity_extractor import (
|
| 6 |
+
NamedEntityAnnotation,
|
| 7 |
+
NamedEntityExtractor,
|
| 8 |
+
NamedEntityExtractorBackend,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
__all__ = ["NamedEntityExtractor", "NamedEntityExtractorBackend", "NamedEntityAnnotation"]
|
testbed/deepset-ai__haystack/haystack/components/extractors/named_entity_extractor.py
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from typing import Any, Dict, List, Optional, Union
|
| 10 |
+
|
| 11 |
+
from haystack import ComponentError, DeserializationError, Document, component, default_from_dict, default_to_dict
|
| 12 |
+
from haystack.lazy_imports import LazyImport
|
| 13 |
+
from haystack.utils.device import ComponentDevice
|
| 14 |
+
|
| 15 |
+
with LazyImport(message="Run 'pip install \"transformers[torch]\"'") as transformers_import:
|
| 16 |
+
from transformers import AutoModelForTokenClassification, AutoTokenizer, pipeline
|
| 17 |
+
from transformers import Pipeline as HfPipeline
|
| 18 |
+
|
| 19 |
+
with LazyImport(message="Run 'pip install spacy'") as spacy_import:
|
| 20 |
+
import spacy
|
| 21 |
+
from spacy import Language as SpacyPipeline
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class NamedEntityExtractorBackend(Enum):
|
| 25 |
+
"""
|
| 26 |
+
NLP backend to use for Named Entity Recognition.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
#: Uses an Hugging Face model and pipeline.
|
| 30 |
+
HUGGING_FACE = "hugging_face"
|
| 31 |
+
|
| 32 |
+
#: Uses a spaCy model and pipeline.
|
| 33 |
+
SPACY = "spacy"
|
| 34 |
+
|
| 35 |
+
def __str__(self):
|
| 36 |
+
return self.value
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def from_str(string: str) -> "NamedEntityExtractorBackend":
|
| 40 |
+
"""
|
| 41 |
+
Convert a string to a NamedEntityExtractorBackend enum.
|
| 42 |
+
"""
|
| 43 |
+
enum_map = {e.value: e for e in NamedEntityExtractorBackend}
|
| 44 |
+
mode = enum_map.get(string)
|
| 45 |
+
if mode is None:
|
| 46 |
+
msg = (
|
| 47 |
+
f"Invalid backend '{string}' for named entity extractor. "
|
| 48 |
+
f"Supported backends are: {list(enum_map.keys())}"
|
| 49 |
+
)
|
| 50 |
+
raise ComponentError(msg)
|
| 51 |
+
return mode
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class NamedEntityAnnotation:
|
| 56 |
+
"""
|
| 57 |
+
Describes a single NER annotation.
|
| 58 |
+
|
| 59 |
+
:param entity:
|
| 60 |
+
Entity label.
|
| 61 |
+
:param start:
|
| 62 |
+
Start index of the entity in the document.
|
| 63 |
+
:param end:
|
| 64 |
+
End index of the entity in the document.
|
| 65 |
+
:param score:
|
| 66 |
+
Score calculated by the model.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
entity: str
|
| 70 |
+
start: int
|
| 71 |
+
end: int
|
| 72 |
+
score: Optional[float] = None
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@component
|
| 76 |
+
class NamedEntityExtractor:
|
| 77 |
+
"""
|
| 78 |
+
Annotates named entities in a collection of documents.
|
| 79 |
+
|
| 80 |
+
The component supports two backends: Hugging Face and spaCy. The
|
| 81 |
+
former can be used with any sequence classification model from the
|
| 82 |
+
[Hugging Face model hub](https://huggingface.co/models), while the
|
| 83 |
+
latter can be used with any [spaCy model](https://spacy.io/models)
|
| 84 |
+
that contains an NER component. Annotations are stored as metadata
|
| 85 |
+
in the documents.
|
| 86 |
+
|
| 87 |
+
Usage example:
|
| 88 |
+
```python
|
| 89 |
+
from haystack import Document
|
| 90 |
+
from haystack.components.extractors.named_entity_extractor import NamedEntityExtractor
|
| 91 |
+
|
| 92 |
+
documents = [
|
| 93 |
+
Document(content="I'm Merlin, the happy pig!"),
|
| 94 |
+
Document(content="My name is Clara and I live in Berkeley, California."),
|
| 95 |
+
]
|
| 96 |
+
extractor = NamedEntityExtractor(backend="hugging_face", model="dslim/bert-base-NER")
|
| 97 |
+
extractor.warm_up()
|
| 98 |
+
results = extractor.run(documents=documents)["documents"]
|
| 99 |
+
annotations = [NamedEntityExtractor.get_stored_annotations(doc) for doc in results]
|
| 100 |
+
print(annotations)
|
| 101 |
+
```
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
_METADATA_KEY = "named_entities"
|
| 105 |
+
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
*,
|
| 109 |
+
backend: Union[str, NamedEntityExtractorBackend],
|
| 110 |
+
model: str,
|
| 111 |
+
pipeline_kwargs: Optional[Dict[str, Any]] = None,
|
| 112 |
+
device: Optional[ComponentDevice] = None,
|
| 113 |
+
) -> None:
|
| 114 |
+
"""
|
| 115 |
+
Create a Named Entity extractor component.
|
| 116 |
+
|
| 117 |
+
:param backend:
|
| 118 |
+
Backend to use for NER.
|
| 119 |
+
:param model:
|
| 120 |
+
Name of the model or a path to the model on
|
| 121 |
+
the local disk. Dependent on the backend.
|
| 122 |
+
:param pipeline_kwargs:
|
| 123 |
+
Keyword arguments passed to the pipeline. The
|
| 124 |
+
pipeline can override these arguments. Dependent on the backend.
|
| 125 |
+
:param device:
|
| 126 |
+
The device on which the model is loaded. If `None`,
|
| 127 |
+
the default device is automatically selected. If a
|
| 128 |
+
device/device map is specified in `pipeline_kwargs`,
|
| 129 |
+
it overrides this parameter (only applicable to the
|
| 130 |
+
HuggingFace backend).
|
| 131 |
+
"""
|
| 132 |
+
|
| 133 |
+
if isinstance(backend, str):
|
| 134 |
+
backend = NamedEntityExtractorBackend.from_str(backend)
|
| 135 |
+
|
| 136 |
+
self._backend: _NerBackend
|
| 137 |
+
self._warmed_up: bool = False
|
| 138 |
+
device = ComponentDevice.resolve_device(device)
|
| 139 |
+
|
| 140 |
+
if backend == NamedEntityExtractorBackend.HUGGING_FACE:
|
| 141 |
+
self._backend = _HfBackend(model_name_or_path=model, device=device, pipeline_kwargs=pipeline_kwargs)
|
| 142 |
+
elif backend == NamedEntityExtractorBackend.SPACY:
|
| 143 |
+
self._backend = _SpacyBackend(model_name_or_path=model, device=device, pipeline_kwargs=pipeline_kwargs)
|
| 144 |
+
else:
|
| 145 |
+
raise ComponentError(f"Unknown NER backend '{type(backend).__name__}' for extractor")
|
| 146 |
+
|
| 147 |
+
def warm_up(self):
|
| 148 |
+
"""
|
| 149 |
+
Initialize the component.
|
| 150 |
+
|
| 151 |
+
:raises ComponentError:
|
| 152 |
+
If the backend fails to initialize successfully.
|
| 153 |
+
"""
|
| 154 |
+
if self._warmed_up:
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
try:
|
| 158 |
+
self._backend.initialize()
|
| 159 |
+
self._warmed_up = True
|
| 160 |
+
except Exception as e:
|
| 161 |
+
raise ComponentError(
|
| 162 |
+
f"Named entity extractor with backend '{self._backend.type} failed to initialize."
|
| 163 |
+
) from e
|
| 164 |
+
|
| 165 |
+
@component.output_types(documents=List[Document])
|
| 166 |
+
def run(self, documents: List[Document], batch_size: int = 1) -> Dict[str, Any]:
|
| 167 |
+
"""
|
| 168 |
+
Annotate named entities in each document and store the annotations in the document's metadata.
|
| 169 |
+
|
| 170 |
+
:param documents:
|
| 171 |
+
Documents to process.
|
| 172 |
+
:param batch_size:
|
| 173 |
+
Batch size used for processing the documents.
|
| 174 |
+
:returns:
|
| 175 |
+
Processed documents.
|
| 176 |
+
:raises ComponentError:
|
| 177 |
+
If the backend fails to process a document.
|
| 178 |
+
"""
|
| 179 |
+
if not self._warmed_up:
|
| 180 |
+
msg = "The component NamedEntityExtractor was not warmed up. Call warm_up() before running the component."
|
| 181 |
+
raise RuntimeError(msg)
|
| 182 |
+
|
| 183 |
+
texts = [doc.content if doc.content is not None else "" for doc in documents]
|
| 184 |
+
annotations = self._backend.annotate(texts, batch_size=batch_size)
|
| 185 |
+
|
| 186 |
+
if len(annotations) != len(documents):
|
| 187 |
+
raise ComponentError(
|
| 188 |
+
"NER backend did not return the correct number of annotations; "
|
| 189 |
+
f"got {len(annotations)} but expected {len(documents)}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
for doc, doc_annotations in zip(documents, annotations):
|
| 193 |
+
doc.meta[self._METADATA_KEY] = doc_annotations
|
| 194 |
+
|
| 195 |
+
return {"documents": documents}
|
| 196 |
+
|
| 197 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 198 |
+
"""
|
| 199 |
+
Serializes the component to a dictionary.
|
| 200 |
+
|
| 201 |
+
:returns:
|
| 202 |
+
Dictionary with serialized data.
|
| 203 |
+
"""
|
| 204 |
+
return default_to_dict(
|
| 205 |
+
self,
|
| 206 |
+
backend=self._backend.type.name,
|
| 207 |
+
model=self._backend.model_name,
|
| 208 |
+
device=self._backend.device.to_dict(),
|
| 209 |
+
pipeline_kwargs=self._backend._pipeline_kwargs,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
@classmethod
|
| 213 |
+
def from_dict(cls, data: Dict[str, Any]) -> "NamedEntityExtractor":
|
| 214 |
+
"""
|
| 215 |
+
Deserializes the component from a dictionary.
|
| 216 |
+
|
| 217 |
+
:param data:
|
| 218 |
+
Dictionary to deserialize from.
|
| 219 |
+
:returns:
|
| 220 |
+
Deserialized component.
|
| 221 |
+
"""
|
| 222 |
+
try:
|
| 223 |
+
init_params = data["init_parameters"]
|
| 224 |
+
if init_params.get("device") is not None:
|
| 225 |
+
init_params["device"] = ComponentDevice.from_dict(init_params["device"])
|
| 226 |
+
init_params["backend"] = NamedEntityExtractorBackend[init_params["backend"]]
|
| 227 |
+
return default_from_dict(cls, data)
|
| 228 |
+
except Exception as e:
|
| 229 |
+
raise DeserializationError(f"Couldn't deserialize {cls.__name__} instance") from e
|
| 230 |
+
|
| 231 |
+
@property
|
| 232 |
+
def initialized(self) -> bool:
|
| 233 |
+
"""
|
| 234 |
+
Returns if the extractor is ready to annotate text.
|
| 235 |
+
"""
|
| 236 |
+
return self._backend.initialized
|
| 237 |
+
|
| 238 |
+
@classmethod
|
| 239 |
+
def get_stored_annotations(cls, document: Document) -> Optional[List[NamedEntityAnnotation]]:
|
| 240 |
+
"""
|
| 241 |
+
Returns the document's named entity annotations stored in its metadata, if any.
|
| 242 |
+
|
| 243 |
+
:param document:
|
| 244 |
+
Document whose annotations are to be fetched.
|
| 245 |
+
:returns:
|
| 246 |
+
The stored annotations.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
return document.meta.get(cls._METADATA_KEY)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class _NerBackend(ABC):
|
| 253 |
+
"""
|
| 254 |
+
Base class for NER backends.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def __init__(
|
| 258 |
+
self,
|
| 259 |
+
_type: NamedEntityExtractorBackend,
|
| 260 |
+
device: ComponentDevice,
|
| 261 |
+
pipeline_kwargs: Optional[Dict[str, Any]] = None,
|
| 262 |
+
) -> None:
|
| 263 |
+
super().__init__()
|
| 264 |
+
|
| 265 |
+
self._type = _type
|
| 266 |
+
self._device = device
|
| 267 |
+
self._pipeline_kwargs = pipeline_kwargs if pipeline_kwargs is not None else {}
|
| 268 |
+
|
| 269 |
+
@abstractmethod
|
| 270 |
+
def initialize(self):
|
| 271 |
+
"""
|
| 272 |
+
Initializes the backend. This would usually entail loading models, pipelines, and so on.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
@abstractmethod
|
| 277 |
+
def initialized(self) -> bool:
|
| 278 |
+
"""
|
| 279 |
+
Returns if the backend has been initialized, for example, ready to annotate text.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
@abstractmethod
|
| 283 |
+
def annotate(self, texts: List[str], *, batch_size: int = 1) -> List[List[NamedEntityAnnotation]]:
|
| 284 |
+
"""
|
| 285 |
+
Predict annotations for a collection of documents.
|
| 286 |
+
|
| 287 |
+
:param texts:
|
| 288 |
+
Raw texts to be annotated.
|
| 289 |
+
:param batch_size:
|
| 290 |
+
Size of text batches that are
|
| 291 |
+
passed to the model.
|
| 292 |
+
:returns:
|
| 293 |
+
NER annotations.
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
@property
|
| 297 |
+
@abstractmethod
|
| 298 |
+
def model_name(self) -> str:
|
| 299 |
+
"""
|
| 300 |
+
Returns the model name or path on the local disk.
|
| 301 |
+
"""
|
| 302 |
+
|
| 303 |
+
@property
|
| 304 |
+
def device(self) -> ComponentDevice:
|
| 305 |
+
"""
|
| 306 |
+
The device on which the backend's model is loaded.
|
| 307 |
+
|
| 308 |
+
:returns:
|
| 309 |
+
The device on which the backend's model is loaded.
|
| 310 |
+
"""
|
| 311 |
+
return self._device
|
| 312 |
+
|
| 313 |
+
@property
|
| 314 |
+
def type(self) -> NamedEntityExtractorBackend:
|
| 315 |
+
"""
|
| 316 |
+
Returns the type of the backend.
|
| 317 |
+
"""
|
| 318 |
+
return self._type
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class _HfBackend(_NerBackend):
|
| 322 |
+
"""
|
| 323 |
+
Hugging Face backend for NER.
|
| 324 |
+
"""
|
| 325 |
+
|
| 326 |
+
def __init__(
|
| 327 |
+
self, *, model_name_or_path: str, device: ComponentDevice, pipeline_kwargs: Optional[Dict[str, Any]] = None
|
| 328 |
+
) -> None:
|
| 329 |
+
"""
|
| 330 |
+
Construct a Hugging Face NER backend.
|
| 331 |
+
|
| 332 |
+
:param model_name_or_path:
|
| 333 |
+
Name of the model or a path to the Hugging Face
|
| 334 |
+
model on the local disk.
|
| 335 |
+
:param device:
|
| 336 |
+
The device on which the model is loaded. If `None`,
|
| 337 |
+
the default device is automatically selected.
|
| 338 |
+
|
| 339 |
+
If a device/device map is specified in `pipeline_kwargs`,
|
| 340 |
+
it overrides this parameter.
|
| 341 |
+
:param pipeline_kwargs:
|
| 342 |
+
Keyword arguments passed to the pipeline. The
|
| 343 |
+
pipeline can override these arguments.
|
| 344 |
+
"""
|
| 345 |
+
super().__init__(NamedEntityExtractorBackend.HUGGING_FACE, device, pipeline_kwargs)
|
| 346 |
+
|
| 347 |
+
transformers_import.check()
|
| 348 |
+
|
| 349 |
+
self._model_name_or_path = model_name_or_path
|
| 350 |
+
self.tokenizer: Optional[AutoTokenizer] = None
|
| 351 |
+
self.model: Optional[AutoModelForTokenClassification] = None
|
| 352 |
+
self.pipeline: Optional[HfPipeline] = None
|
| 353 |
+
|
| 354 |
+
def initialize(self):
|
| 355 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self._model_name_or_path)
|
| 356 |
+
self.model = AutoModelForTokenClassification.from_pretrained(self._model_name_or_path)
|
| 357 |
+
|
| 358 |
+
pipeline_params = {
|
| 359 |
+
"task": "ner",
|
| 360 |
+
"model": self.model,
|
| 361 |
+
"tokenizer": self.tokenizer,
|
| 362 |
+
"aggregation_strategy": "simple",
|
| 363 |
+
}
|
| 364 |
+
pipeline_params.update({k: v for k, v in self._pipeline_kwargs.items() if k not in pipeline_params})
|
| 365 |
+
self.device.update_hf_kwargs(pipeline_params, overwrite=False)
|
| 366 |
+
self.pipeline = pipeline(**pipeline_params)
|
| 367 |
+
|
| 368 |
+
def annotate(self, texts: List[str], *, batch_size: int = 1) -> List[List[NamedEntityAnnotation]]:
|
| 369 |
+
if not self.initialized:
|
| 370 |
+
raise ComponentError("Hugging Face NER backend was not initialized - Did you call `warm_up()`?")
|
| 371 |
+
|
| 372 |
+
assert self.pipeline is not None
|
| 373 |
+
outputs = self.pipeline(texts, batch_size=batch_size)
|
| 374 |
+
return [
|
| 375 |
+
[
|
| 376 |
+
NamedEntityAnnotation(
|
| 377 |
+
entity=annotation["entity"] if "entity" in annotation else annotation["entity_group"],
|
| 378 |
+
start=annotation["start"],
|
| 379 |
+
end=annotation["end"],
|
| 380 |
+
score=annotation["score"],
|
| 381 |
+
)
|
| 382 |
+
for annotation in annotations
|
| 383 |
+
]
|
| 384 |
+
for annotations in outputs
|
| 385 |
+
]
|
| 386 |
+
|
| 387 |
+
@property
|
| 388 |
+
def initialized(self) -> bool:
|
| 389 |
+
return self.tokenizer is not None and self.model is not None or self.pipeline is not None
|
| 390 |
+
|
| 391 |
+
@property
|
| 392 |
+
def model_name(self) -> str:
|
| 393 |
+
return self._model_name_or_path
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
class _SpacyBackend(_NerBackend):
|
| 397 |
+
"""
|
| 398 |
+
spaCy backend for NER.
|
| 399 |
+
"""
|
| 400 |
+
|
| 401 |
+
def __init__(
|
| 402 |
+
self, *, model_name_or_path: str, device: ComponentDevice, pipeline_kwargs: Optional[Dict[str, Any]] = None
|
| 403 |
+
) -> None:
|
| 404 |
+
"""
|
| 405 |
+
Construct a spaCy NER backend.
|
| 406 |
+
|
| 407 |
+
:param model_name_or_path:
|
| 408 |
+
Name of the model or a path to the spaCy
|
| 409 |
+
model on the local disk.
|
| 410 |
+
:param device:
|
| 411 |
+
The device on which the model is loaded. If `None`,
|
| 412 |
+
the default device is automatically selected.
|
| 413 |
+
:param pipeline_kwargs:
|
| 414 |
+
Keyword arguments passed to the pipeline. The
|
| 415 |
+
pipeline can override these arguments.
|
| 416 |
+
"""
|
| 417 |
+
super().__init__(NamedEntityExtractorBackend.SPACY, device, pipeline_kwargs)
|
| 418 |
+
|
| 419 |
+
spacy_import.check()
|
| 420 |
+
|
| 421 |
+
self._model_name_or_path = model_name_or_path
|
| 422 |
+
self.pipeline: Optional[SpacyPipeline] = None
|
| 423 |
+
|
| 424 |
+
if self.device.has_multiple_devices:
|
| 425 |
+
raise ValueError("spaCy backend for named entity extractor only supports inference on single devices")
|
| 426 |
+
|
| 427 |
+
def initialize(self):
|
| 428 |
+
# We need to initialize the model on the GPU if needed.
|
| 429 |
+
with self._select_device():
|
| 430 |
+
self.pipeline = spacy.load(self._model_name_or_path)
|
| 431 |
+
|
| 432 |
+
if not self.pipeline.has_pipe("ner"):
|
| 433 |
+
raise ComponentError(f"spaCy pipeline '{self._model_name_or_path}' does not contain an NER component")
|
| 434 |
+
|
| 435 |
+
# Disable unnecessary pipes.
|
| 436 |
+
pipes_to_keep = ("ner", "tok2vec", "transformer", "curated_transformer")
|
| 437 |
+
for name in self.pipeline.pipe_names:
|
| 438 |
+
if name not in pipes_to_keep:
|
| 439 |
+
self.pipeline.disable_pipe(name)
|
| 440 |
+
|
| 441 |
+
self._pipeline_kwargs = {k: v for k, v in self._pipeline_kwargs.items() if k not in ("texts", "batch_size")}
|
| 442 |
+
|
| 443 |
+
def annotate(self, texts: List[str], *, batch_size: int = 1) -> List[List[NamedEntityAnnotation]]:
|
| 444 |
+
if not self.initialized:
|
| 445 |
+
raise ComponentError("spaCy NER backend was not initialized - Did you call `warm_up()`?")
|
| 446 |
+
|
| 447 |
+
assert self.pipeline is not None
|
| 448 |
+
with self._select_device():
|
| 449 |
+
outputs = list(self.pipeline.pipe(texts=texts, batch_size=batch_size, **self._pipeline_kwargs))
|
| 450 |
+
|
| 451 |
+
return [
|
| 452 |
+
[
|
| 453 |
+
NamedEntityAnnotation(entity=entity.label_, start=entity.start_char, end=entity.end_char)
|
| 454 |
+
for entity in doc.ents
|
| 455 |
+
]
|
| 456 |
+
for doc in outputs
|
| 457 |
+
]
|
| 458 |
+
|
| 459 |
+
@property
|
| 460 |
+
def initialized(self) -> bool:
|
| 461 |
+
return self.pipeline is not None
|
| 462 |
+
|
| 463 |
+
@property
|
| 464 |
+
def model_name(self) -> str:
|
| 465 |
+
return self._model_name_or_path
|
| 466 |
+
|
| 467 |
+
@contextmanager
|
| 468 |
+
def _select_device(self):
|
| 469 |
+
"""
|
| 470 |
+
Context manager used to run spaCy models on a specific GPU in a scoped manner.
|
| 471 |
+
"""
|
| 472 |
+
|
| 473 |
+
# TODO: This won't restore the active device.
|
| 474 |
+
# Since there are no opaque API functions to determine
|
| 475 |
+
# the active device in spaCy/Thinc, we can't do much
|
| 476 |
+
# about it as a consumer unless we start poking into their
|
| 477 |
+
# internals.
|
| 478 |
+
device_id = self._device.to_spacy()
|
| 479 |
+
try:
|
| 480 |
+
if device_id >= 0:
|
| 481 |
+
spacy.require_gpu(device_id)
|
| 482 |
+
yield
|
| 483 |
+
finally:
|
| 484 |
+
if device_id >= 0:
|
| 485 |
+
spacy.require_cpu()
|
testbed/deepset-ai__haystack/haystack/components/joiners/answer_joiner.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import itertools
|
| 6 |
+
from enum import Enum
|
| 7 |
+
from math import inf
|
| 8 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 9 |
+
|
| 10 |
+
from haystack import component, default_from_dict, default_to_dict, logging
|
| 11 |
+
from haystack.core.component.types import Variadic
|
| 12 |
+
from haystack.dataclasses.answer import ExtractedAnswer, ExtractedTableAnswer, GeneratedAnswer
|
| 13 |
+
|
| 14 |
+
AnswerType = Union[GeneratedAnswer, ExtractedTableAnswer, ExtractedAnswer]
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class JoinMode(Enum):
|
| 20 |
+
"""
|
| 21 |
+
Enum for AnswerJoiner join modes.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
CONCATENATE = "concatenate"
|
| 25 |
+
|
| 26 |
+
def __str__(self):
|
| 27 |
+
return self.value
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def from_str(string: str) -> "JoinMode":
|
| 31 |
+
"""
|
| 32 |
+
Convert a string to a JoinMode enum.
|
| 33 |
+
"""
|
| 34 |
+
enum_map = {e.value: e for e in JoinMode}
|
| 35 |
+
mode = enum_map.get(string)
|
| 36 |
+
if mode is None:
|
| 37 |
+
msg = f"Unknown join mode '{string}'. Supported modes in AnswerJoiner are: {list(enum_map.keys())}"
|
| 38 |
+
raise ValueError(msg)
|
| 39 |
+
return mode
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@component
|
| 43 |
+
class AnswerJoiner:
|
| 44 |
+
"""
|
| 45 |
+
Merges multiple lists of `Answer` objects into a single list.
|
| 46 |
+
|
| 47 |
+
Use this component to combine answers from different Generators into a single list.
|
| 48 |
+
Currently, the component supports only one join mode: `CONCATENATE`.
|
| 49 |
+
This mode concatenates multiple lists of answers into a single list.
|
| 50 |
+
|
| 51 |
+
### Usage example
|
| 52 |
+
|
| 53 |
+
In this example, AnswerJoiner merges answers from two different Generators:
|
| 54 |
+
|
| 55 |
+
```python
|
| 56 |
+
from haystack.components.builders import AnswerBuilder
|
| 57 |
+
from haystack.components.joiners import AnswerJoiner
|
| 58 |
+
|
| 59 |
+
from haystack.core.pipeline import Pipeline
|
| 60 |
+
|
| 61 |
+
from haystack.components.generators.chat import OpenAIChatGenerator
|
| 62 |
+
from haystack.dataclasses import ChatMessage
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
query = "What's Natural Language Processing?"
|
| 66 |
+
messages = [ChatMessage.from_system("You are a helpful, respectful and honest assistant. Be super concise."),
|
| 67 |
+
ChatMessage.from_user(query)]
|
| 68 |
+
|
| 69 |
+
pipe = Pipeline()
|
| 70 |
+
pipe.add_component("gpt-4o", OpenAIChatGenerator(model="gpt-4o"))
|
| 71 |
+
pipe.add_component("llama", OpenAIChatGenerator(model="gpt-3.5-turbo"))
|
| 72 |
+
pipe.add_component("aba", AnswerBuilder())
|
| 73 |
+
pipe.add_component("abb", AnswerBuilder())
|
| 74 |
+
pipe.add_component("joiner", AnswerJoiner())
|
| 75 |
+
|
| 76 |
+
pipe.connect("gpt-4o.replies", "aba")
|
| 77 |
+
pipe.connect("llama.replies", "abb")
|
| 78 |
+
pipe.connect("aba.answers", "joiner")
|
| 79 |
+
pipe.connect("abb.answers", "joiner")
|
| 80 |
+
|
| 81 |
+
results = pipe.run(data={"gpt-4o": {"messages": messages},
|
| 82 |
+
"llama": {"messages": messages},
|
| 83 |
+
"aba": {"query": query},
|
| 84 |
+
"abb": {"query": query}})
|
| 85 |
+
```
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
join_mode: Union[str, JoinMode] = JoinMode.CONCATENATE,
|
| 91 |
+
top_k: Optional[int] = None,
|
| 92 |
+
sort_by_score: bool = False,
|
| 93 |
+
):
|
| 94 |
+
"""
|
| 95 |
+
Creates an AnswerJoiner component.
|
| 96 |
+
|
| 97 |
+
:param join_mode:
|
| 98 |
+
Specifies the join mode to use. Available modes:
|
| 99 |
+
- `concatenate`: Concatenates multiple lists of Answers into a single list.
|
| 100 |
+
:param top_k:
|
| 101 |
+
The maximum number of Answers to return.
|
| 102 |
+
:param sort_by_score:
|
| 103 |
+
If `True`, sorts the documents by score in descending order.
|
| 104 |
+
If a document has no score, it is handled as if its score is -infinity.
|
| 105 |
+
"""
|
| 106 |
+
if isinstance(join_mode, str):
|
| 107 |
+
join_mode = JoinMode.from_str(join_mode)
|
| 108 |
+
join_mode_functions: Dict[JoinMode, Callable[[List[List[AnswerType]]], List[AnswerType]]] = {
|
| 109 |
+
JoinMode.CONCATENATE: self._concatenate
|
| 110 |
+
}
|
| 111 |
+
self.join_mode_function: Callable[[List[List[AnswerType]]], List[AnswerType]] = join_mode_functions[join_mode]
|
| 112 |
+
self.join_mode = join_mode
|
| 113 |
+
self.top_k = top_k
|
| 114 |
+
self.sort_by_score = sort_by_score
|
| 115 |
+
|
| 116 |
+
@component.output_types(answers=List[AnswerType])
|
| 117 |
+
def run(self, answers: Variadic[List[AnswerType]], top_k: Optional[int] = None):
|
| 118 |
+
"""
|
| 119 |
+
Joins multiple lists of Answers into a single list depending on the `join_mode` parameter.
|
| 120 |
+
|
| 121 |
+
:param answers:
|
| 122 |
+
Nested list of Answers to be merged.
|
| 123 |
+
|
| 124 |
+
:param top_k:
|
| 125 |
+
The maximum number of Answers to return. Overrides the instance's `top_k` if provided.
|
| 126 |
+
|
| 127 |
+
:returns:
|
| 128 |
+
A dictionary with the following keys:
|
| 129 |
+
- `answers`: Merged list of Answers
|
| 130 |
+
"""
|
| 131 |
+
answers_list = list(answers)
|
| 132 |
+
join_function = self.join_mode_function
|
| 133 |
+
output_answers: List[AnswerType] = join_function(answers_list)
|
| 134 |
+
|
| 135 |
+
if self.sort_by_score:
|
| 136 |
+
output_answers = sorted(
|
| 137 |
+
output_answers, key=lambda answer: answer.score if hasattr(answer, "score") else -inf, reverse=True
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
top_k = top_k or self.top_k
|
| 141 |
+
if top_k:
|
| 142 |
+
output_answers = output_answers[:top_k]
|
| 143 |
+
return {"answers": output_answers}
|
| 144 |
+
|
| 145 |
+
def _concatenate(self, answer_lists: List[List[AnswerType]]) -> List[AnswerType]:
|
| 146 |
+
"""
|
| 147 |
+
Concatenate multiple lists of Answers, flattening them into a single list and sorting by score.
|
| 148 |
+
|
| 149 |
+
:param answer_lists: List of lists of Answers to be flattened.
|
| 150 |
+
"""
|
| 151 |
+
return list(itertools.chain.from_iterable(answer_lists))
|
| 152 |
+
|
| 153 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 154 |
+
"""
|
| 155 |
+
Serializes the component to a dictionary.
|
| 156 |
+
|
| 157 |
+
:returns:
|
| 158 |
+
Dictionary with serialized data.
|
| 159 |
+
"""
|
| 160 |
+
return default_to_dict(self, join_mode=str(self.join_mode), top_k=self.top_k, sort_by_score=self.sort_by_score)
|
| 161 |
+
|
| 162 |
+
@classmethod
|
| 163 |
+
def from_dict(cls, data: Dict[str, Any]) -> "AnswerJoiner":
|
| 164 |
+
"""
|
| 165 |
+
Deserializes the component from a dictionary.
|
| 166 |
+
|
| 167 |
+
:param data:
|
| 168 |
+
The dictionary to deserialize from.
|
| 169 |
+
:returns:
|
| 170 |
+
The deserialized component.
|
| 171 |
+
"""
|
| 172 |
+
return default_from_dict(cls, data)
|
testbed/deepset-ai__haystack/haystack/components/joiners/branch.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, Type
|
| 6 |
+
|
| 7 |
+
from haystack import component, default_from_dict, default_to_dict, logging
|
| 8 |
+
from haystack.core.component.types import GreedyVariadic
|
| 9 |
+
from haystack.utils import deserialize_type, serialize_type
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@component()
|
| 15 |
+
class BranchJoiner:
|
| 16 |
+
"""
|
| 17 |
+
A component to join different branches of a pipeline into one single output.
|
| 18 |
+
|
| 19 |
+
`BranchJoiner` receives multiple data connections of the same type from other components and passes the first
|
| 20 |
+
value coming to its single output, possibly distributing it to various other components.
|
| 21 |
+
|
| 22 |
+
`BranchJoiner` is fundamental to close loops in a pipeline, where the two branches it joins are the ones
|
| 23 |
+
coming from the previous component and one coming back from a loop. For example, `BranchJoiner` could be used
|
| 24 |
+
to send data to a component evaluating errors. `BranchJoiner` would receive two connections, one to get the
|
| 25 |
+
original data and another one to get modified data in case there was an error. In both cases, `BranchJoiner`
|
| 26 |
+
would send (or re-send in case of a loop) data to the component evaluating errors. See "Usage example" below.
|
| 27 |
+
|
| 28 |
+
Another use case with a need for `BranchJoiner` is to reconcile multiple branches coming out of a decision
|
| 29 |
+
or Classifier component. For example, in a RAG pipeline, there might be a "query language classifier" component
|
| 30 |
+
sending the query to different retrievers, selecting one specifically according to the detected language. After the
|
| 31 |
+
retrieval step the pipeline would ideally continue with a `PromptBuilder`, and since we don't know in advance the
|
| 32 |
+
language of the query, all the retrievers should be ideally connected to the single `PromptBuilder`. Since the
|
| 33 |
+
`PromptBuilder` won't accept more than one connection in input, we would connect all the retrievers to a
|
| 34 |
+
`BranchJoiner` component and reconcile them in a single output that can be connected to the `PromptBuilder`
|
| 35 |
+
downstream.
|
| 36 |
+
|
| 37 |
+
Usage example:
|
| 38 |
+
|
| 39 |
+
```python
|
| 40 |
+
import json
|
| 41 |
+
from typing import List
|
| 42 |
+
|
| 43 |
+
from haystack import Pipeline
|
| 44 |
+
from haystack.components.converters import OutputAdapter
|
| 45 |
+
from haystack.components.generators.chat import OpenAIChatGenerator
|
| 46 |
+
from haystack.components.joiners import BranchJoiner
|
| 47 |
+
from haystack.components.validators import JsonSchemaValidator
|
| 48 |
+
from haystack.dataclasses import ChatMessage
|
| 49 |
+
|
| 50 |
+
person_schema = {
|
| 51 |
+
"type": "object",
|
| 52 |
+
"properties": {
|
| 53 |
+
"first_name": {"type": "string", "pattern": "^[A-Z][a-z]+$"},
|
| 54 |
+
"last_name": {"type": "string", "pattern": "^[A-Z][a-z]+$"},
|
| 55 |
+
"nationality": {"type": "string", "enum": ["Italian", "Portuguese", "American"]},
|
| 56 |
+
},
|
| 57 |
+
"required": ["first_name", "last_name", "nationality"]
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# Initialize a pipeline
|
| 61 |
+
pipe = Pipeline()
|
| 62 |
+
|
| 63 |
+
# Add components to the pipeline
|
| 64 |
+
pipe.add_component('joiner', BranchJoiner(List[ChatMessage]))
|
| 65 |
+
pipe.add_component('fc_llm', OpenAIChatGenerator(model="gpt-4o-mini"))
|
| 66 |
+
pipe.add_component('validator', JsonSchemaValidator(json_schema=person_schema))
|
| 67 |
+
pipe.add_component('adapter', OutputAdapter("{{chat_message}}", List[ChatMessage])),
|
| 68 |
+
# And connect them
|
| 69 |
+
pipe.connect("adapter", "joiner")
|
| 70 |
+
pipe.connect("joiner", "fc_llm")
|
| 71 |
+
pipe.connect("fc_llm.replies", "validator.messages")
|
| 72 |
+
pipe.connect("validator.validation_error", "joiner")
|
| 73 |
+
|
| 74 |
+
result = pipe.run(data={"fc_llm": {"generation_kwargs": {"response_format": {"type": "json_object"}}},
|
| 75 |
+
"adapter": {"chat_message": [ChatMessage.from_user("Create json from Peter Parker")]}})
|
| 76 |
+
|
| 77 |
+
print(json.loads(result["validator"]["validated"][0].content))
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
>> {'first_name': 'Peter', 'last_name': 'Parker', 'nationality': 'American', 'name': 'Spider-Man', 'occupation':
|
| 81 |
+
>> 'Superhero', 'age': 23, 'location': 'New York City'}
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
Note that `BranchJoiner` can manage only one data type at a time. In this case, `BranchJoiner` is created for
|
| 85 |
+
passing `List[ChatMessage]`. This determines the type of data that `BranchJoiner` will receive from the upstream
|
| 86 |
+
connected components and also the type of data that `BranchJoiner` will send through its output.
|
| 87 |
+
|
| 88 |
+
In the code example, `BranchJoiner` receives a looped back `List[ChatMessage]` from the `JsonSchemaValidator` and
|
| 89 |
+
sends it down to the `OpenAIChatGenerator` for re-generation. We can have multiple loopback connections in the
|
| 90 |
+
pipeline. In this instance, the downstream component is only one (the `OpenAIChatGenerator`), but the pipeline might
|
| 91 |
+
have more than one downstream component.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, type_: Type):
|
| 95 |
+
"""
|
| 96 |
+
Create a `BranchJoiner` component.
|
| 97 |
+
|
| 98 |
+
:param type_: The type of data that the `BranchJoiner` will receive from the upstream connected components and
|
| 99 |
+
distribute to the downstream connected components.
|
| 100 |
+
"""
|
| 101 |
+
self.type_ = type_
|
| 102 |
+
# type_'s type can't be determined statically
|
| 103 |
+
component.set_input_types(self, value=GreedyVariadic[type_]) # type: ignore
|
| 104 |
+
component.set_output_types(self, value=type_)
|
| 105 |
+
|
| 106 |
+
def to_dict(self):
|
| 107 |
+
"""
|
| 108 |
+
Serializes the component to a dictionary.
|
| 109 |
+
|
| 110 |
+
:returns:
|
| 111 |
+
Dictionary with serialized data.
|
| 112 |
+
"""
|
| 113 |
+
return default_to_dict(self, type_=serialize_type(self.type_))
|
| 114 |
+
|
| 115 |
+
@classmethod
|
| 116 |
+
def from_dict(cls, data: Dict[str, Any]) -> "BranchJoiner":
|
| 117 |
+
"""
|
| 118 |
+
Deserializes the component from a dictionary.
|
| 119 |
+
|
| 120 |
+
:param data:
|
| 121 |
+
Dictionary to deserialize from.
|
| 122 |
+
:returns:
|
| 123 |
+
Deserialized component.
|
| 124 |
+
"""
|
| 125 |
+
data["init_parameters"]["type_"] = deserialize_type(data["init_parameters"]["type_"])
|
| 126 |
+
return default_from_dict(cls, data)
|
| 127 |
+
|
| 128 |
+
def run(self, **kwargs):
|
| 129 |
+
"""
|
| 130 |
+
The run method of the `BranchJoiner` component.
|
| 131 |
+
|
| 132 |
+
Multiplexes the input data from the upstream connected components and distributes it to the downstream connected
|
| 133 |
+
components.
|
| 134 |
+
|
| 135 |
+
:param **kwargs: The input data. Must be of the type declared in `__init__`.
|
| 136 |
+
:return: A dictionary with the following keys:
|
| 137 |
+
- `value`: The input data.
|
| 138 |
+
"""
|
| 139 |
+
if (inputs_count := len(kwargs["value"])) != 1:
|
| 140 |
+
raise ValueError(f"BranchJoiner expects only one input, but {inputs_count} were received.")
|
| 141 |
+
return {"value": kwargs["value"][0]}
|
testbed/deepset-ai__haystack/haystack/components/joiners/document_joiner.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import itertools
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from enum import Enum
|
| 8 |
+
from math import inf
|
| 9 |
+
from typing import Any, Dict, List, Optional, Union
|
| 10 |
+
|
| 11 |
+
from haystack import Document, component, default_from_dict, default_to_dict, logging
|
| 12 |
+
from haystack.core.component.types import Variadic
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class JoinMode(Enum):
|
| 18 |
+
"""
|
| 19 |
+
Enum for join mode.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
CONCATENATE = "concatenate"
|
| 23 |
+
MERGE = "merge"
|
| 24 |
+
RECIPROCAL_RANK_FUSION = "reciprocal_rank_fusion"
|
| 25 |
+
DISTRIBUTION_BASED_RANK_FUSION = "distribution_based_rank_fusion"
|
| 26 |
+
|
| 27 |
+
def __str__(self):
|
| 28 |
+
return self.value
|
| 29 |
+
|
| 30 |
+
@staticmethod
|
| 31 |
+
def from_str(string: str) -> "JoinMode":
|
| 32 |
+
"""
|
| 33 |
+
Convert a string to a JoinMode enum.
|
| 34 |
+
"""
|
| 35 |
+
enum_map = {e.value: e for e in JoinMode}
|
| 36 |
+
mode = enum_map.get(string)
|
| 37 |
+
if mode is None:
|
| 38 |
+
msg = f"Unknown join mode '{string}'. Supported modes in DocumentJoiner are: {list(enum_map.keys())}"
|
| 39 |
+
raise ValueError(msg)
|
| 40 |
+
return mode
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@component
|
| 44 |
+
class DocumentJoiner:
|
| 45 |
+
"""
|
| 46 |
+
Joins multiple lists of documents into a single list.
|
| 47 |
+
|
| 48 |
+
It supports different join modes:
|
| 49 |
+
- concatenate: Keeps the highest-scored document in case of duplicates.
|
| 50 |
+
- merge: Calculates a weighted sum of scores for duplicates and merges them.
|
| 51 |
+
- reciprocal_rank_fusion: Merges and assigns scores based on reciprocal rank fusion.
|
| 52 |
+
- distribution_based_rank_fusion: Merges and assigns scores based on scores distribution in each Retriever.
|
| 53 |
+
|
| 54 |
+
### Usage example:
|
| 55 |
+
|
| 56 |
+
```python
|
| 57 |
+
document_store = InMemoryDocumentStore()
|
| 58 |
+
p = Pipeline()
|
| 59 |
+
p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="bm25_retriever")
|
| 60 |
+
p.add_component(
|
| 61 |
+
instance=SentenceTransformersTextEmbedder(model="sentence-transformers/all-MiniLM-L6-v2"),
|
| 62 |
+
name="text_embedder",
|
| 63 |
+
)
|
| 64 |
+
p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name="embedding_retriever")
|
| 65 |
+
p.add_component(instance=DocumentJoiner(), name="joiner")
|
| 66 |
+
p.connect("bm25_retriever", "joiner")
|
| 67 |
+
p.connect("embedding_retriever", "joiner")
|
| 68 |
+
p.connect("text_embedder", "embedding_retriever")
|
| 69 |
+
query = "What is the capital of France?"
|
| 70 |
+
p.run(data={"query": query})
|
| 71 |
+
```
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
join_mode: Union[str, JoinMode] = JoinMode.CONCATENATE,
|
| 77 |
+
weights: Optional[List[float]] = None,
|
| 78 |
+
top_k: Optional[int] = None,
|
| 79 |
+
sort_by_score: bool = True,
|
| 80 |
+
):
|
| 81 |
+
"""
|
| 82 |
+
Creates a DocumentJoiner component.
|
| 83 |
+
|
| 84 |
+
:param join_mode:
|
| 85 |
+
Specifies the join mode to use. Available modes:
|
| 86 |
+
- `concatenate`: Keeps the highest-scored document in case of duplicates.
|
| 87 |
+
- `merge`: Calculates a weighted sum of scores for duplicates and merges them.
|
| 88 |
+
- `reciprocal_rank_fusion`: Merges and assigns scores based on reciprocal rank fusion.
|
| 89 |
+
- `distribution_based_rank_fusion`: Merges and assigns scores based on scores
|
| 90 |
+
distribution in each Retriever.
|
| 91 |
+
:param weights:
|
| 92 |
+
Assign importance to each list of documents to influence how they're joined.
|
| 93 |
+
This parameter is ignored for
|
| 94 |
+
`concatenate` or `distribution_based_rank_fusion` join modes.
|
| 95 |
+
Weight for each list of documents must match the number of inputs.
|
| 96 |
+
:param top_k:
|
| 97 |
+
The maximum number of documents to return.
|
| 98 |
+
:param sort_by_score:
|
| 99 |
+
If `True`, sorts the documents by score in descending order.
|
| 100 |
+
If a document has no score, it is handled as if its score is -infinity.
|
| 101 |
+
"""
|
| 102 |
+
if isinstance(join_mode, str):
|
| 103 |
+
join_mode = JoinMode.from_str(join_mode)
|
| 104 |
+
join_mode_functions = {
|
| 105 |
+
JoinMode.CONCATENATE: self._concatenate,
|
| 106 |
+
JoinMode.MERGE: self._merge,
|
| 107 |
+
JoinMode.RECIPROCAL_RANK_FUSION: self._reciprocal_rank_fusion,
|
| 108 |
+
JoinMode.DISTRIBUTION_BASED_RANK_FUSION: self._distribution_based_rank_fusion,
|
| 109 |
+
}
|
| 110 |
+
self.join_mode_function = join_mode_functions[join_mode]
|
| 111 |
+
self.join_mode = join_mode
|
| 112 |
+
self.weights = [float(i) / sum(weights) for i in weights] if weights else None
|
| 113 |
+
self.top_k = top_k
|
| 114 |
+
self.sort_by_score = sort_by_score
|
| 115 |
+
|
| 116 |
+
@component.output_types(documents=List[Document])
|
| 117 |
+
def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):
|
| 118 |
+
"""
|
| 119 |
+
Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.
|
| 120 |
+
|
| 121 |
+
:param documents:
|
| 122 |
+
List of list of documents to be merged.
|
| 123 |
+
:param top_k:
|
| 124 |
+
The maximum number of documents to return. Overrides the instance's `top_k` if provided.
|
| 125 |
+
|
| 126 |
+
:returns:
|
| 127 |
+
A dictionary with the following keys:
|
| 128 |
+
- `documents`: Merged list of Documents
|
| 129 |
+
"""
|
| 130 |
+
output_documents = []
|
| 131 |
+
|
| 132 |
+
documents = list(documents)
|
| 133 |
+
output_documents = self.join_mode_function(documents)
|
| 134 |
+
|
| 135 |
+
if self.sort_by_score:
|
| 136 |
+
output_documents = sorted(
|
| 137 |
+
output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True
|
| 138 |
+
)
|
| 139 |
+
if any(doc.score is None for doc in output_documents):
|
| 140 |
+
logger.info(
|
| 141 |
+
"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by "
|
| 142 |
+
"score, so those with score=None were sorted as if they had a score of -infinity."
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
if top_k:
|
| 146 |
+
output_documents = output_documents[:top_k]
|
| 147 |
+
elif self.top_k:
|
| 148 |
+
output_documents = output_documents[: self.top_k]
|
| 149 |
+
|
| 150 |
+
return {"documents": output_documents}
|
| 151 |
+
|
| 152 |
+
def _concatenate(self, document_lists: List[List[Document]]) -> List[Document]:
|
| 153 |
+
"""
|
| 154 |
+
Concatenate multiple lists of Documents and return only the Document with the highest score for duplicates.
|
| 155 |
+
"""
|
| 156 |
+
output = []
|
| 157 |
+
docs_per_id = defaultdict(list)
|
| 158 |
+
for doc in itertools.chain.from_iterable(document_lists):
|
| 159 |
+
docs_per_id[doc.id].append(doc)
|
| 160 |
+
for docs in docs_per_id.values():
|
| 161 |
+
doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)
|
| 162 |
+
output.append(doc_with_best_score)
|
| 163 |
+
return output
|
| 164 |
+
|
| 165 |
+
def _merge(self, document_lists: List[List[Document]]) -> List[Document]:
|
| 166 |
+
"""
|
| 167 |
+
Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.
|
| 168 |
+
"""
|
| 169 |
+
scores_map: dict = defaultdict(int)
|
| 170 |
+
documents_map = {}
|
| 171 |
+
weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
|
| 172 |
+
|
| 173 |
+
for documents, weight in zip(document_lists, weights):
|
| 174 |
+
for doc in documents:
|
| 175 |
+
scores_map[doc.id] += (doc.score if doc.score else 0) * weight
|
| 176 |
+
documents_map[doc.id] = doc
|
| 177 |
+
|
| 178 |
+
for doc in documents_map.values():
|
| 179 |
+
doc.score = scores_map[doc.id]
|
| 180 |
+
|
| 181 |
+
return list(documents_map.values())
|
| 182 |
+
|
| 183 |
+
def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:
|
| 184 |
+
"""
|
| 185 |
+
Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.
|
| 186 |
+
|
| 187 |
+
The constant k is set to 61 (60 was suggested by the original paper,
|
| 188 |
+
plus 1 as python lists are 0-based and the paper used 1-based ranking).
|
| 189 |
+
"""
|
| 190 |
+
k = 61
|
| 191 |
+
|
| 192 |
+
scores_map: dict = defaultdict(int)
|
| 193 |
+
documents_map = {}
|
| 194 |
+
weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
|
| 195 |
+
|
| 196 |
+
# Calculate weighted reciprocal rank fusion score
|
| 197 |
+
for documents, weight in zip(document_lists, weights):
|
| 198 |
+
for rank, doc in enumerate(documents):
|
| 199 |
+
scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)
|
| 200 |
+
documents_map[doc.id] = doc
|
| 201 |
+
|
| 202 |
+
# Normalize scores. Note: len(results) / k is the maximum possible score,
|
| 203 |
+
# achieved by being ranked first in all doc lists with non-zero weight.
|
| 204 |
+
for _id in scores_map:
|
| 205 |
+
scores_map[_id] /= len(document_lists) / k
|
| 206 |
+
|
| 207 |
+
for doc in documents_map.values():
|
| 208 |
+
doc.score = scores_map[doc.id]
|
| 209 |
+
|
| 210 |
+
return list(documents_map.values())
|
| 211 |
+
|
| 212 |
+
def _distribution_based_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:
|
| 213 |
+
"""
|
| 214 |
+
Merge multiple lists of Documents and assign scores based on Distribution-Based Score Fusion.
|
| 215 |
+
|
| 216 |
+
(https://medium.com/plain-simple-software/distribution-based-score-fusion-dbsf-a-new-approach-to-vector-search-ranking-f87c37488b18)
|
| 217 |
+
If a Document is in more than one retriever, the one with the highest score is used.
|
| 218 |
+
"""
|
| 219 |
+
for documents in document_lists:
|
| 220 |
+
scores_list = []
|
| 221 |
+
|
| 222 |
+
for doc in documents:
|
| 223 |
+
scores_list.append(doc.score if doc.score is not None else 0)
|
| 224 |
+
|
| 225 |
+
mean_score = sum(scores_list) / len(scores_list)
|
| 226 |
+
std_dev = (sum((x - mean_score) ** 2 for x in scores_list) / len(scores_list)) ** 0.5
|
| 227 |
+
min_score = mean_score - 3 * std_dev
|
| 228 |
+
max_score = mean_score + 3 * std_dev
|
| 229 |
+
delta_score = max_score - min_score
|
| 230 |
+
|
| 231 |
+
for doc in documents:
|
| 232 |
+
doc.score = (doc.score - min_score) / delta_score if delta_score != 0.0 else 0.0
|
| 233 |
+
# if all docs have the same score delta_score is 0, the docs are uninformative for the query
|
| 234 |
+
|
| 235 |
+
output = self._concatenate(document_lists=document_lists)
|
| 236 |
+
|
| 237 |
+
return output
|
| 238 |
+
|
| 239 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 240 |
+
"""
|
| 241 |
+
Serializes the component to a dictionary.
|
| 242 |
+
|
| 243 |
+
:returns:
|
| 244 |
+
Dictionary with serialized data.
|
| 245 |
+
"""
|
| 246 |
+
return default_to_dict(
|
| 247 |
+
self,
|
| 248 |
+
join_mode=str(self.join_mode),
|
| 249 |
+
weights=self.weights,
|
| 250 |
+
top_k=self.top_k,
|
| 251 |
+
sort_by_score=self.sort_by_score,
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
@classmethod
|
| 255 |
+
def from_dict(cls, data: Dict[str, Any]) -> "DocumentJoiner":
|
| 256 |
+
"""
|
| 257 |
+
Deserializes the component from a dictionary.
|
| 258 |
+
|
| 259 |
+
:param data:
|
| 260 |
+
The dictionary to deserialize from.
|
| 261 |
+
:returns:
|
| 262 |
+
The deserialized component.
|
| 263 |
+
"""
|
| 264 |
+
return default_from_dict(cls, data)
|
testbed/deepset-ai__haystack/haystack/components/joiners/string_joiner.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import List
|
| 6 |
+
|
| 7 |
+
from haystack import component, logging
|
| 8 |
+
from haystack.core.component.types import Variadic
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@component
|
| 14 |
+
class StringJoiner:
|
| 15 |
+
"""
|
| 16 |
+
Component to join strings from different components to a list of strings.
|
| 17 |
+
|
| 18 |
+
### Usage example
|
| 19 |
+
|
| 20 |
+
```python
|
| 21 |
+
from haystack.components.joiners import StringJoiner
|
| 22 |
+
from haystack.components.builders import PromptBuilder
|
| 23 |
+
from haystack.core.pipeline import Pipeline
|
| 24 |
+
|
| 25 |
+
from haystack.components.generators.chat import OpenAIChatGenerator
|
| 26 |
+
from haystack.dataclasses import ChatMessage
|
| 27 |
+
|
| 28 |
+
string_1 = "What's Natural Language Processing?"
|
| 29 |
+
string_2 = "What is life?"
|
| 30 |
+
|
| 31 |
+
pipeline = Pipeline()
|
| 32 |
+
pipeline.add_component("prompt_builder_1", PromptBuilder("Builder 1: {{query}}"))
|
| 33 |
+
pipeline.add_component("prompt_builder_2", PromptBuilder("Builder 2: {{query}}"))
|
| 34 |
+
pipeline.add_component("string_joiner", StringJoiner())
|
| 35 |
+
|
| 36 |
+
pipeline.connect("prompt_builder_1.prompt", "string_joiner.strings")
|
| 37 |
+
pipeline.connect("prompt_builder_2.prompt", "string_joiner.strings")
|
| 38 |
+
|
| 39 |
+
print(pipeline.run(data={"prompt_builder_1": {"query": string_1}, "prompt_builder_2": {"query": string_2}}))
|
| 40 |
+
|
| 41 |
+
>> {"string_joiner": {"strings": ["Builder 1: What's Natural Language Processing?", "Builder 2: What is life?"]}}
|
| 42 |
+
```
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
@component.output_types(strings=List[str])
|
| 46 |
+
def run(self, strings: Variadic[str]):
|
| 47 |
+
"""
|
| 48 |
+
Joins strings into a list of strings
|
| 49 |
+
|
| 50 |
+
:param strings:
|
| 51 |
+
strings from different components
|
| 52 |
+
|
| 53 |
+
:returns:
|
| 54 |
+
A dictionary with the following keys:
|
| 55 |
+
- `strings`: Merged list of strings
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
out_strings = list(strings)
|
| 59 |
+
return {"strings": out_strings}
|
testbed/deepset-ai__haystack/haystack/components/retrievers/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.retrievers.filter_retriever import FilterRetriever
|
| 6 |
+
from haystack.components.retrievers.in_memory.bm25_retriever import InMemoryBM25Retriever
|
| 7 |
+
from haystack.components.retrievers.in_memory.embedding_retriever import InMemoryEmbeddingRetriever
|
| 8 |
+
from haystack.components.retrievers.sentence_window_retriever import SentenceWindowRetriever
|
| 9 |
+
|
| 10 |
+
__all__ = ["FilterRetriever", "InMemoryEmbeddingRetriever", "InMemoryBM25Retriever", "SentenceWindowRetriever"]
|
testbed/deepset-ai__haystack/haystack/components/retrievers/sentence_window_retriever.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from haystack import Document, component, default_from_dict, default_to_dict
|
| 8 |
+
from haystack.document_stores.types import DocumentStore
|
| 9 |
+
from haystack.utils import deserialize_document_store_in_init_params_inplace
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@component
|
| 13 |
+
class SentenceWindowRetriever:
|
| 14 |
+
"""
|
| 15 |
+
Retrieves documents adjacent to a given document in the Document Store.
|
| 16 |
+
|
| 17 |
+
During indexing, documents are broken into smaller chunks, or sentences. When you submit a query,
|
| 18 |
+
the Retriever fetches the most relevant sentence. To provide full context,
|
| 19 |
+
SentenceWindowRetriever fetches a number of neighboring sentences before and after each
|
| 20 |
+
relevant one. You can set this number with the `window_size` parameter.
|
| 21 |
+
It uses `source_id` and `doc.meta['split_id']` to locate the surrounding documents.
|
| 22 |
+
|
| 23 |
+
This component works with existing Retrievers, like BM25Retriever or
|
| 24 |
+
EmbeddingRetriever. First, use a Retriever to find documents based on a query and then use
|
| 25 |
+
SentenceWindowRetriever to get the surrounding documents for context.
|
| 26 |
+
|
| 27 |
+
The SentenceWindowRetriever is compatible with the following DocumentStores:
|
| 28 |
+
- [Astra](https://docs.haystack.deepset.ai/docs/astradocumentstore)
|
| 29 |
+
- [Elasticsearch](https://docs.haystack.deepset.ai/docs/elasticsearch-document-store)
|
| 30 |
+
- [OpenSearch](https://docs.haystack.deepset.ai/docs/opensearch-document-store)
|
| 31 |
+
- [Pgvector](https://docs.haystack.deepset.ai/docs/pgvectordocumentstore)
|
| 32 |
+
- [Pinecone](https://docs.haystack.deepset.ai/docs/pinecone-document-store)
|
| 33 |
+
- [Qdrant](https://docs.haystack.deepset.ai/docs/qdrant-document-store)
|
| 34 |
+
|
| 35 |
+
### Usage example
|
| 36 |
+
|
| 37 |
+
```python
|
| 38 |
+
from haystack import Document, Pipeline
|
| 39 |
+
from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
|
| 40 |
+
from haystack.components.retrievers import SentenceWindowRetriever
|
| 41 |
+
from haystack.components.preprocessors import DocumentSplitter
|
| 42 |
+
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
| 43 |
+
|
| 44 |
+
splitter = DocumentSplitter(split_length=10, split_overlap=5, split_by="word")
|
| 45 |
+
text = (
|
| 46 |
+
"This is a text with some words. There is a second sentence. And there is also a third sentence. "
|
| 47 |
+
"It also contains a fourth sentence. And a fifth sentence. And a sixth sentence. And a seventh sentence"
|
| 48 |
+
)
|
| 49 |
+
doc = Document(content=text)
|
| 50 |
+
docs = splitter.run([doc])
|
| 51 |
+
doc_store = InMemoryDocumentStore()
|
| 52 |
+
doc_store.write_documents(docs["documents"])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
rag = Pipeline()
|
| 56 |
+
rag.add_component("bm25_retriever", InMemoryBM25Retriever(doc_store, top_k=1))
|
| 57 |
+
rag.add_component("sentence_window_retriever", SentenceWindowRetriever(document_store=doc_store, window_size=2))
|
| 58 |
+
rag.connect("bm25_retriever", "sentence_window_retriever")
|
| 59 |
+
|
| 60 |
+
rag.run({'bm25_retriever': {"query":"third"}})
|
| 61 |
+
|
| 62 |
+
>> {'sentence_window_retriever': {'context_windows': ['some words. There is a second sentence.
|
| 63 |
+
>> And there is also a third sentence. It also contains a fourth sentence. And a fifth sentence. And a sixth
|
| 64 |
+
>> sentence. And a'], 'context_documents': [[Document(id=..., content: 'some words. There is a second sentence.
|
| 65 |
+
>> And there is ', meta: {'source_id': '...', 'page_number': 1, 'split_id': 1, 'split_idx_start': 20,
|
| 66 |
+
>> '_split_overlap': [{'doc_id': '...', 'range': (20, 43)}, {'doc_id': '...', 'range': (0, 30)}]}),
|
| 67 |
+
>> Document(id=..., content: 'second sentence. And there is also a third sentence. It ',
|
| 68 |
+
>> meta: {'source_id': '74ea87deb38012873cf8c07e...f19d01a26a098447113e1d7b83efd30c02987114', 'page_number': 1,
|
| 69 |
+
>> 'split_id': 2, 'split_idx_start': 43, '_split_overlap': [{'doc_id': '...', 'range': (23, 53)}, {'doc_id': '...',
|
| 70 |
+
>> 'range': (0, 26)}]}), Document(id=..., content: 'also a third sentence. It also contains a fourth sentence. ',
|
| 71 |
+
>> meta: {'source_id': '...', 'page_number': 1, 'split_id': 3, 'split_idx_start': 73, '_split_overlap':
|
| 72 |
+
>> [{'doc_id': '...', 'range': (30, 56)}, {'doc_id': '...', 'range': (0, 33)}]}), Document(id=..., content:
|
| 73 |
+
>> 'also contains a fourth sentence. And a fifth sentence. And ', meta: {'source_id': '...', 'page_number': 1,
|
| 74 |
+
>> 'split_id': 4, 'split_idx_start': 99, '_split_overlap': [{'doc_id': '...', 'range': (26, 59)},
|
| 75 |
+
>> {'doc_id': '...', 'range': (0, 26)}]}), Document(id=..., content: 'And a fifth sentence. And a sixth sentence.
|
| 76 |
+
>> And a ', meta: {'source_id': '...', 'page_number': 1, 'split_id': 5, 'split_idx_start': 132,
|
| 77 |
+
>> '_split_overlap': [{'doc_id': '...', 'range': (33, 59)}, {'doc_id': '...', 'range': (0, 24)}]})]]}}}}
|
| 78 |
+
```
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self, document_store: DocumentStore, window_size: int = 3):
|
| 82 |
+
"""
|
| 83 |
+
Creates a new SentenceWindowRetriever component.
|
| 84 |
+
|
| 85 |
+
:param document_store: The Document Store to retrieve the surrounding documents from.
|
| 86 |
+
:param window_size: The number of documents to retrieve before and after the relevant one.
|
| 87 |
+
For example, `window_size: 2` fetches 2 preceding and 2 following documents.
|
| 88 |
+
"""
|
| 89 |
+
if window_size < 1:
|
| 90 |
+
raise ValueError("The window_size parameter must be greater than 0.")
|
| 91 |
+
|
| 92 |
+
self.window_size = window_size
|
| 93 |
+
self.document_store = document_store
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def merge_documents_text(documents: List[Document]) -> str:
|
| 97 |
+
"""
|
| 98 |
+
Merge a list of document text into a single string.
|
| 99 |
+
|
| 100 |
+
This functions concatenates the textual content of a list of documents into a single string, eliminating any
|
| 101 |
+
overlapping content.
|
| 102 |
+
|
| 103 |
+
:param documents: List of Documents to merge.
|
| 104 |
+
"""
|
| 105 |
+
sorted_docs = sorted(documents, key=lambda doc: doc.meta["split_idx_start"])
|
| 106 |
+
merged_text = ""
|
| 107 |
+
last_idx_end = 0
|
| 108 |
+
for doc in sorted_docs:
|
| 109 |
+
start = doc.meta["split_idx_start"] # start of the current content
|
| 110 |
+
|
| 111 |
+
# if the start of the current content is before the end of the last appended content, adjust it
|
| 112 |
+
start = max(start, last_idx_end)
|
| 113 |
+
|
| 114 |
+
# append the non-overlapping part to the merged text
|
| 115 |
+
merged_text += doc.content[start - doc.meta["split_idx_start"] :] # type: ignore
|
| 116 |
+
|
| 117 |
+
# update the last end index
|
| 118 |
+
last_idx_end = doc.meta["split_idx_start"] + len(doc.content) # type: ignore
|
| 119 |
+
|
| 120 |
+
return merged_text
|
| 121 |
+
|
| 122 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 123 |
+
"""
|
| 124 |
+
Serializes the component to a dictionary.
|
| 125 |
+
|
| 126 |
+
:returns:
|
| 127 |
+
Dictionary with serialized data.
|
| 128 |
+
"""
|
| 129 |
+
docstore = self.document_store.to_dict()
|
| 130 |
+
return default_to_dict(self, document_store=docstore, window_size=self.window_size)
|
| 131 |
+
|
| 132 |
+
@classmethod
|
| 133 |
+
def from_dict(cls, data: Dict[str, Any]) -> "SentenceWindowRetriever":
|
| 134 |
+
"""
|
| 135 |
+
Deserializes the component from a dictionary.
|
| 136 |
+
|
| 137 |
+
:returns:
|
| 138 |
+
Deserialized component.
|
| 139 |
+
"""
|
| 140 |
+
# deserialize the document store
|
| 141 |
+
deserialize_document_store_in_init_params_inplace(data)
|
| 142 |
+
|
| 143 |
+
# deserialize the component
|
| 144 |
+
return default_from_dict(cls, data)
|
| 145 |
+
|
| 146 |
+
@component.output_types(context_windows=List[str], context_documents=List[List[Document]])
|
| 147 |
+
def run(self, retrieved_documents: List[Document], window_size: Optional[int] = None):
|
| 148 |
+
"""
|
| 149 |
+
Based on the `source_id` and on the `doc.meta['split_id']` get surrounding documents from the document store.
|
| 150 |
+
|
| 151 |
+
Implements the logic behind the sentence-window technique, retrieving the surrounding documents of a given
|
| 152 |
+
document from the document store.
|
| 153 |
+
|
| 154 |
+
:param retrieved_documents: List of retrieved documents from the previous retriever.
|
| 155 |
+
:param window_size: The number of documents to retrieve before and after the relevant one. This will overwrite
|
| 156 |
+
the `window_size` parameter set in the constructor.
|
| 157 |
+
:returns:
|
| 158 |
+
A dictionary with the following keys:
|
| 159 |
+
- `context_windows`: A list of strings, where each string represents the concatenated text from the
|
| 160 |
+
context window of the corresponding document in `retrieved_documents`.
|
| 161 |
+
- `context_documents`: A list of lists of `Document` objects, where each inner list contains the
|
| 162 |
+
documents that come from the context window for the corresponding document in
|
| 163 |
+
`retrieved_documents`.
|
| 164 |
+
|
| 165 |
+
"""
|
| 166 |
+
window_size = window_size or self.window_size
|
| 167 |
+
|
| 168 |
+
if window_size < 1:
|
| 169 |
+
raise ValueError("The window_size parameter must be greater than 0.")
|
| 170 |
+
|
| 171 |
+
if not all("split_id" in doc.meta for doc in retrieved_documents):
|
| 172 |
+
raise ValueError("The retrieved documents must have 'split_id' in the metadata.")
|
| 173 |
+
|
| 174 |
+
if not all("source_id" in doc.meta for doc in retrieved_documents):
|
| 175 |
+
raise ValueError("The retrieved documents must have 'source_id' in the metadata.")
|
| 176 |
+
|
| 177 |
+
context_text = []
|
| 178 |
+
context_documents = []
|
| 179 |
+
for doc in retrieved_documents:
|
| 180 |
+
source_id = doc.meta["source_id"]
|
| 181 |
+
split_id = doc.meta["split_id"]
|
| 182 |
+
min_before = min(list(range(split_id - 1, split_id - window_size - 1, -1)))
|
| 183 |
+
max_after = max(list(range(split_id + 1, split_id + window_size + 1, 1)))
|
| 184 |
+
context_docs = self.document_store.filter_documents(
|
| 185 |
+
{
|
| 186 |
+
"operator": "AND",
|
| 187 |
+
"conditions": [
|
| 188 |
+
{"field": "meta.source_id", "operator": "==", "value": source_id},
|
| 189 |
+
{"field": "meta.split_id", "operator": ">=", "value": min_before},
|
| 190 |
+
{"field": "meta.split_id", "operator": "<=", "value": max_after},
|
| 191 |
+
],
|
| 192 |
+
}
|
| 193 |
+
)
|
| 194 |
+
context_text.append(self.merge_documents_text(context_docs))
|
| 195 |
+
context_documents.append(context_docs)
|
| 196 |
+
|
| 197 |
+
return {"context_windows": context_text, "context_documents": context_documents}
|
testbed/deepset-ai__haystack/haystack/components/routers/file_type_router.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import mimetypes
|
| 6 |
+
import re
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Any, Dict, List, Optional, Union
|
| 10 |
+
|
| 11 |
+
from haystack import component, default_from_dict, default_to_dict, logging
|
| 12 |
+
from haystack.components.converters.utils import get_bytestream_from_source, normalize_metadata
|
| 13 |
+
from haystack.dataclasses import ByteStream
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# we add markdown because it is not added by the mimetypes module
|
| 19 |
+
# see https://github.com/python/cpython/pull/17995
|
| 20 |
+
CUSTOM_MIMETYPES = {".md": "text/markdown", ".markdown": "text/markdown"}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@component
|
| 24 |
+
class FileTypeRouter:
|
| 25 |
+
"""
|
| 26 |
+
Categorizes files or byte streams by their MIME types, helping in context-based routing.
|
| 27 |
+
|
| 28 |
+
FileTypeRouter supports both exact MIME type matching and regex patterns.
|
| 29 |
+
|
| 30 |
+
For file paths, MIME types come from extensions, while byte streams use metadata.
|
| 31 |
+
You can use regex patterns in the `mime_types` parameter to set broad categories
|
| 32 |
+
(such as 'audio/*' or 'text/*') or specific types.
|
| 33 |
+
MIME types without regex patterns are treated as exact matches.
|
| 34 |
+
|
| 35 |
+
### Usage example
|
| 36 |
+
|
| 37 |
+
```python
|
| 38 |
+
from haystack.components.routers import FileTypeRouter
|
| 39 |
+
from pathlib import Path
|
| 40 |
+
|
| 41 |
+
# For exact MIME type matching
|
| 42 |
+
router = FileTypeRouter(mime_types=["text/plain", "application/pdf"])
|
| 43 |
+
|
| 44 |
+
# For flexible matching using regex, to handle all audio types
|
| 45 |
+
router_with_regex = FileTypeRouter(mime_types=[r"audio/.*", r"text/plain"])
|
| 46 |
+
|
| 47 |
+
sources = [Path("file.txt"), Path("document.pdf"), Path("song.mp3")]
|
| 48 |
+
print(router.run(sources=sources))
|
| 49 |
+
print(router_with_regex.run(sources=sources))
|
| 50 |
+
|
| 51 |
+
# Expected output:
|
| 52 |
+
# {'text/plain': [
|
| 53 |
+
# PosixPath('file.txt')], 'application/pdf': [PosixPath('document.pdf')], 'unclassified': [PosixPath('song.mp3')
|
| 54 |
+
# ]}
|
| 55 |
+
# {'audio/.*': [
|
| 56 |
+
# PosixPath('song.mp3')], 'text/plain': [PosixPath('file.txt')], 'unclassified': [PosixPath('document.pdf')
|
| 57 |
+
# ]}
|
| 58 |
+
```
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(self, mime_types: List[str], additional_mimetypes: Optional[Dict[str, str]] = None):
|
| 62 |
+
"""
|
| 63 |
+
Initialize the FileTypeRouter component.
|
| 64 |
+
|
| 65 |
+
:param mime_types:
|
| 66 |
+
A list of MIME types or regex patterns to classify the input files or byte streams.
|
| 67 |
+
(for example: `["text/plain", "audio/x-wav", "image/jpeg"]`).
|
| 68 |
+
|
| 69 |
+
:param additional_mimetypes:
|
| 70 |
+
A dictionary containing the MIME type to add to the mimetypes package to prevent unsupported or non native
|
| 71 |
+
packages from being unclassified.
|
| 72 |
+
(for example: `{"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx"}`).
|
| 73 |
+
"""
|
| 74 |
+
if not mime_types:
|
| 75 |
+
raise ValueError("The list of mime types cannot be empty.")
|
| 76 |
+
|
| 77 |
+
if additional_mimetypes:
|
| 78 |
+
for mime, ext in additional_mimetypes.items():
|
| 79 |
+
mimetypes.add_type(mime, ext)
|
| 80 |
+
|
| 81 |
+
self.mime_type_patterns = []
|
| 82 |
+
for mime_type in mime_types:
|
| 83 |
+
try:
|
| 84 |
+
pattern = re.compile(mime_type)
|
| 85 |
+
except re.error:
|
| 86 |
+
raise ValueError(f"Invalid regex pattern '{mime_type}'.")
|
| 87 |
+
self.mime_type_patterns.append(pattern)
|
| 88 |
+
|
| 89 |
+
# the actual output type is List[Union[Path, ByteStream]],
|
| 90 |
+
# but this would cause PipelineConnectError with Converters
|
| 91 |
+
component.set_output_types(
|
| 92 |
+
self,
|
| 93 |
+
unclassified=List[Union[str, Path, ByteStream]],
|
| 94 |
+
**{mime_type: List[Union[str, Path, ByteStream]] for mime_type in mime_types},
|
| 95 |
+
)
|
| 96 |
+
self.mime_types = mime_types
|
| 97 |
+
self._additional_mimetypes = additional_mimetypes
|
| 98 |
+
|
| 99 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 100 |
+
"""
|
| 101 |
+
Serializes the component to a dictionary.
|
| 102 |
+
|
| 103 |
+
:returns:
|
| 104 |
+
Dictionary with serialized data.
|
| 105 |
+
"""
|
| 106 |
+
return default_to_dict(self, mime_types=self.mime_types, additional_mimetypes=self._additional_mimetypes)
|
| 107 |
+
|
| 108 |
+
@classmethod
|
| 109 |
+
def from_dict(cls, data: Dict[str, Any]) -> "FileTypeRouter":
|
| 110 |
+
"""
|
| 111 |
+
Deserializes the component from a dictionary.
|
| 112 |
+
|
| 113 |
+
:param data:
|
| 114 |
+
The dictionary to deserialize from.
|
| 115 |
+
:returns:
|
| 116 |
+
The deserialized component.
|
| 117 |
+
"""
|
| 118 |
+
return default_from_dict(cls, data)
|
| 119 |
+
|
| 120 |
+
def run(
|
| 121 |
+
self,
|
| 122 |
+
sources: List[Union[str, Path, ByteStream]],
|
| 123 |
+
meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
|
| 124 |
+
) -> Dict[str, List[Union[ByteStream, Path]]]:
|
| 125 |
+
"""
|
| 126 |
+
Categorize files or byte streams according to their MIME types.
|
| 127 |
+
|
| 128 |
+
:param sources:
|
| 129 |
+
A list of file paths or byte streams to categorize.
|
| 130 |
+
|
| 131 |
+
:param meta:
|
| 132 |
+
Optional metadata to attach to the sources.
|
| 133 |
+
When provided, the sources are internally converted to ByteStream objects and the metadata is added.
|
| 134 |
+
This value can be a list of dictionaries or a single dictionary.
|
| 135 |
+
If it's a single dictionary, its content is added to the metadata of all ByteStream objects.
|
| 136 |
+
If it's a list, its length must match the number of sources, as they are zipped together.
|
| 137 |
+
|
| 138 |
+
:returns: A dictionary where the keys are MIME types (or `"unclassified"`) and the values are lists of data
|
| 139 |
+
sources.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
mime_types = defaultdict(list)
|
| 143 |
+
meta_list = normalize_metadata(meta=meta, sources_count=len(sources))
|
| 144 |
+
|
| 145 |
+
for source, meta_dict in zip(sources, meta_list):
|
| 146 |
+
if isinstance(source, str):
|
| 147 |
+
source = Path(source)
|
| 148 |
+
|
| 149 |
+
if isinstance(source, Path):
|
| 150 |
+
mime_type = self._get_mime_type(source)
|
| 151 |
+
elif isinstance(source, ByteStream):
|
| 152 |
+
mime_type = source.mime_type
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError(f"Unsupported data source type: {type(source).__name__}")
|
| 155 |
+
|
| 156 |
+
# If we have metadata, we convert the source to ByteStream and add the metadata
|
| 157 |
+
if meta_dict:
|
| 158 |
+
source = get_bytestream_from_source(source)
|
| 159 |
+
source.meta.update(meta_dict)
|
| 160 |
+
|
| 161 |
+
matched = False
|
| 162 |
+
if mime_type:
|
| 163 |
+
for pattern in self.mime_type_patterns:
|
| 164 |
+
if pattern.fullmatch(mime_type):
|
| 165 |
+
mime_types[pattern.pattern].append(source)
|
| 166 |
+
matched = True
|
| 167 |
+
break
|
| 168 |
+
if not matched:
|
| 169 |
+
mime_types["unclassified"].append(source)
|
| 170 |
+
|
| 171 |
+
return dict(mime_types)
|
| 172 |
+
|
| 173 |
+
def _get_mime_type(self, path: Path) -> Optional[str]:
|
| 174 |
+
"""
|
| 175 |
+
Get the MIME type of the provided file path.
|
| 176 |
+
|
| 177 |
+
:param path: The file path to get the MIME type for.
|
| 178 |
+
|
| 179 |
+
:returns: The MIME type of the provided file path, or `None` if the MIME type cannot be determined.
|
| 180 |
+
"""
|
| 181 |
+
extension = path.suffix.lower()
|
| 182 |
+
mime_type = mimetypes.guess_type(path.as_posix())[0]
|
| 183 |
+
# lookup custom mappings if the mime type is not found
|
| 184 |
+
return CUSTOM_MIMETYPES.get(extension, mime_type)
|
testbed/deepset-ai__haystack/haystack/components/routers/text_language_router.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from haystack import component, logging
|
| 8 |
+
from haystack.lazy_imports import LazyImport
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
with LazyImport("Run 'pip install langdetect'") as langdetect_import:
|
| 13 |
+
import langdetect
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@component
|
| 17 |
+
class TextLanguageRouter:
|
| 18 |
+
"""
|
| 19 |
+
Routes text strings to different output connections based on their language.
|
| 20 |
+
|
| 21 |
+
Provide a list of languages during initialization. If the document's text doesn't match any of the
|
| 22 |
+
specified languages, the metadata value is set to "unmatched".
|
| 23 |
+
For routing documents based on their language, use the DocumentLanguageClassifier component,
|
| 24 |
+
followed by the MetaDataRouter.
|
| 25 |
+
|
| 26 |
+
### Usage example
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
from haystack import Pipeline, Document
|
| 30 |
+
from haystack.components.routers import TextLanguageRouter
|
| 31 |
+
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
| 32 |
+
from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
|
| 33 |
+
|
| 34 |
+
document_store = InMemoryDocumentStore()
|
| 35 |
+
document_store.write_documents([Document(content="Elvis Presley was an American singer and actor.")])
|
| 36 |
+
|
| 37 |
+
p = Pipeline()
|
| 38 |
+
p.add_component(instance=TextLanguageRouter(languages=["en"]), name="text_language_router")
|
| 39 |
+
p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="retriever")
|
| 40 |
+
p.connect("text_language_router.en", "retriever.query")
|
| 41 |
+
|
| 42 |
+
result = p.run({"text_language_router": {"text": "Who was Elvis Presley?"}})
|
| 43 |
+
assert result["retriever"]["documents"][0].content == "Elvis Presley was an American singer and actor."
|
| 44 |
+
|
| 45 |
+
result = p.run({"text_language_router": {"text": "ένα ελληνικό κείμενο"}})
|
| 46 |
+
assert result["text_language_router"]["unmatched"] == "ένα ελληνικό κείμενο"
|
| 47 |
+
```
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, languages: Optional[List[str]] = None):
|
| 51 |
+
"""
|
| 52 |
+
Initialize the TextLanguageRouter component.
|
| 53 |
+
|
| 54 |
+
:param languages: A list of ISO language codes.
|
| 55 |
+
See the supported languages in [`langdetect` documentation](https://github.com/Mimino666/langdetect#languages).
|
| 56 |
+
If not specified, defaults to ["en"].
|
| 57 |
+
"""
|
| 58 |
+
langdetect_import.check()
|
| 59 |
+
if not languages:
|
| 60 |
+
languages = ["en"]
|
| 61 |
+
self.languages = languages
|
| 62 |
+
component.set_output_types(self, unmatched=str, **{language: str for language in languages})
|
| 63 |
+
|
| 64 |
+
def run(self, text: str) -> Dict[str, str]:
|
| 65 |
+
"""
|
| 66 |
+
Routes the text strings to different output connections based on their language.
|
| 67 |
+
|
| 68 |
+
If the document's text doesn't match any of the specified languages, the metadata value is set to "unmatched".
|
| 69 |
+
|
| 70 |
+
:param text: A text string to route.
|
| 71 |
+
|
| 72 |
+
:returns: A dictionary in which the key is the language (or `"unmatched"`),
|
| 73 |
+
and the value is the text.
|
| 74 |
+
|
| 75 |
+
:raises TypeError: If the input is not a string.
|
| 76 |
+
"""
|
| 77 |
+
if not isinstance(text, str):
|
| 78 |
+
msg = (
|
| 79 |
+
"TextLanguageRouter expects a string as input. In case you want to classify a document, please use "
|
| 80 |
+
"the DocumentLanguageClassifier and MetaDataRouter."
|
| 81 |
+
)
|
| 82 |
+
raise TypeError(msg)
|
| 83 |
+
|
| 84 |
+
output: Dict[str, str] = {}
|
| 85 |
+
|
| 86 |
+
detected_language = self._detect_language(text)
|
| 87 |
+
if detected_language in self.languages:
|
| 88 |
+
output[detected_language] = text
|
| 89 |
+
else:
|
| 90 |
+
output["unmatched"] = text
|
| 91 |
+
|
| 92 |
+
return output
|
| 93 |
+
|
| 94 |
+
def _detect_language(self, text: str) -> Optional[str]:
|
| 95 |
+
try:
|
| 96 |
+
language = langdetect.detect(text)
|
| 97 |
+
except langdetect.LangDetectException as exception:
|
| 98 |
+
logger.warning("Langdetect cannot detect the language of text. Error: {error}", error=exception)
|
| 99 |
+
# Only log the text in debug mode, as it might contain sensitive information
|
| 100 |
+
logger.debug("Langdetect cannot detect the language of text: {text}", text=text)
|
| 101 |
+
language = None
|
| 102 |
+
return language
|
testbed/deepset-ai__haystack/haystack/components/samplers/top_p.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from haystack import Document, component, logging
|
| 8 |
+
from haystack.lazy_imports import LazyImport
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
with LazyImport(message="Run 'pip install \"torch>=1.13\"'") as torch_import:
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@component
|
| 18 |
+
class TopPSampler:
|
| 19 |
+
"""
|
| 20 |
+
Implements top-p (nucleus) sampling for document filtering based on cumulative probability scores.
|
| 21 |
+
|
| 22 |
+
This component provides functionality to filter a list of documents by selecting those whose scores fall
|
| 23 |
+
within the top 'p' percent of the cumulative distribution. It is useful for focusing on high-probability
|
| 24 |
+
documents while filtering out less relevant ones based on their assigned scores.
|
| 25 |
+
|
| 26 |
+
Usage example:
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
from haystack import Document
|
| 30 |
+
from haystack.components.samplers import TopPSampler
|
| 31 |
+
|
| 32 |
+
sampler = TopPSampler(top_p=0.95, score_field="similarity_score")
|
| 33 |
+
docs = [
|
| 34 |
+
Document(content="Berlin", meta={"similarity_score": -10.6}),
|
| 35 |
+
Document(content="Belgrade", meta={"similarity_score": -8.9}),
|
| 36 |
+
Document(content="Sarajevo", meta={"similarity_score": -4.6}),
|
| 37 |
+
]
|
| 38 |
+
output = sampler.run(documents=docs)
|
| 39 |
+
docs = output["documents"]
|
| 40 |
+
assert len(docs) == 1
|
| 41 |
+
assert docs[0].content == "Sarajevo"
|
| 42 |
+
```
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self, top_p: float = 1.0, score_field: Optional[str] = None, min_top_k: Optional[int] = None):
|
| 46 |
+
"""
|
| 47 |
+
Creates an instance of TopPSampler.
|
| 48 |
+
|
| 49 |
+
:param top_p: Float between 0 and 1 representing the cumulative probability threshold for document selection.
|
| 50 |
+
A value of 1.0 indicates no filtering (all documents are retained).
|
| 51 |
+
:param score_field: Name of the field in each document's metadata that contains the score. If None, the default
|
| 52 |
+
document score field is used.
|
| 53 |
+
:param min_top_k: If specified, the minimum number of documents to return. If the top_p selects
|
| 54 |
+
fewer documents, additional ones with the next highest scores are added to the selection.
|
| 55 |
+
"""
|
| 56 |
+
torch_import.check()
|
| 57 |
+
|
| 58 |
+
self.top_p = top_p
|
| 59 |
+
if not 0 <= top_p <= 1:
|
| 60 |
+
raise ValueError(f"top_p must be between 0 and 1. Got {top_p}.")
|
| 61 |
+
self.score_field = score_field
|
| 62 |
+
self.min_top_k = min_top_k
|
| 63 |
+
|
| 64 |
+
@component.output_types(documents=List[Document])
|
| 65 |
+
def run(self, documents: List[Document], top_p: Optional[float] = None):
|
| 66 |
+
"""
|
| 67 |
+
Filters documents using top-p sampling based on their scores.
|
| 68 |
+
|
| 69 |
+
If the specified top_p results in no documents being selected (especially in cases of a low top_p value), the
|
| 70 |
+
method returns the document with the highest score.
|
| 71 |
+
|
| 72 |
+
:param documents: List of Document objects to be filtered.
|
| 73 |
+
:param top_p: If specified, a float to override the cumulative probability threshold set during initialization.
|
| 74 |
+
|
| 75 |
+
:returns: A dictionary with the following key:
|
| 76 |
+
- `documents`: List of Document objects that have been selected based on the top-p sampling.
|
| 77 |
+
:raises ValueError: If the top_p value is not within the range [0, 1].
|
| 78 |
+
"""
|
| 79 |
+
if not documents:
|
| 80 |
+
return {"documents": []}
|
| 81 |
+
|
| 82 |
+
top_p = top_p or self.top_p
|
| 83 |
+
if not 0 <= top_p <= 1:
|
| 84 |
+
raise ValueError(f"top_p must be between 0 and 1. Got {top_p}.")
|
| 85 |
+
|
| 86 |
+
documents_with_scores, scores = self._get_documents_and_scores(documents)
|
| 87 |
+
if len(documents_with_scores) == 0:
|
| 88 |
+
logger.warning("No documents with scores found. Returning the original documents.")
|
| 89 |
+
return {"documents": documents}
|
| 90 |
+
|
| 91 |
+
sorted_docs_with_scores = sorted(zip(documents_with_scores, scores), key=lambda x: x[1], reverse=True)
|
| 92 |
+
sorted_documents, sorted_scores = [list(t) for t in zip(*sorted_docs_with_scores)]
|
| 93 |
+
|
| 94 |
+
tensor_scores = torch.tensor(sorted_scores, dtype=torch.float32)
|
| 95 |
+
probs = torch.nn.functional.softmax(tensor_scores, dim=-1)
|
| 96 |
+
cumulative_probs = torch.cumsum(probs, dim=-1)
|
| 97 |
+
|
| 98 |
+
# Check if the cumulative probabilities are close to top_p with a 1e-6 tolerance
|
| 99 |
+
close_to_top_p = torch.isclose(cumulative_probs, torch.tensor(top_p, device=cumulative_probs.device), atol=1e-6)
|
| 100 |
+
|
| 101 |
+
# Combine the close_to_top_p with original condition using logical OR
|
| 102 |
+
condition = (cumulative_probs <= top_p) | close_to_top_p
|
| 103 |
+
|
| 104 |
+
# Find the indices with cumulative probabilities that exceed top_p
|
| 105 |
+
top_p_indices = torch.where(torch.BoolTensor(condition))[0]
|
| 106 |
+
|
| 107 |
+
# Map the selected indices back to their original indices
|
| 108 |
+
selected_docs = [sorted_documents[i.item()] for i in top_p_indices]
|
| 109 |
+
|
| 110 |
+
if self.min_top_k and len(selected_docs) < self.min_top_k:
|
| 111 |
+
selected_docs = sorted_documents[: self.min_top_k]
|
| 112 |
+
|
| 113 |
+
# If low p resulted in no documents being selected, then return at least one document
|
| 114 |
+
if len(selected_docs) == 0:
|
| 115 |
+
logger.warning(
|
| 116 |
+
"Top-p sampling with p={top_p} resulted in no documents being selected. "
|
| 117 |
+
"Returning the document with the highest score.",
|
| 118 |
+
top_p=top_p,
|
| 119 |
+
)
|
| 120 |
+
selected_docs = [sorted_documents[0]]
|
| 121 |
+
|
| 122 |
+
return {"documents": selected_docs}
|
| 123 |
+
|
| 124 |
+
@staticmethod
|
| 125 |
+
def _get_doc_score(doc: Document, score_field: Optional[str] = None) -> Optional[float]:
|
| 126 |
+
"""
|
| 127 |
+
Get the score of a document.
|
| 128 |
+
|
| 129 |
+
:param doc: Document object.
|
| 130 |
+
:param score_field: Name of the field in the document's metadata that contains the score.
|
| 131 |
+
If None, the document score field is used.
|
| 132 |
+
|
| 133 |
+
:return: Score of the document.
|
| 134 |
+
"""
|
| 135 |
+
if score_field:
|
| 136 |
+
score = doc.meta.get(score_field)
|
| 137 |
+
else:
|
| 138 |
+
score = doc.score
|
| 139 |
+
|
| 140 |
+
if not isinstance(score, float):
|
| 141 |
+
score = None
|
| 142 |
+
return score
|
| 143 |
+
|
| 144 |
+
def _get_documents_and_scores(self, documents: List[Document]) -> Tuple[List[Document], List[float]]:
|
| 145 |
+
"""
|
| 146 |
+
Checks if documents have scores in their metadata or score field and returns the documents with scores.
|
| 147 |
+
|
| 148 |
+
:param documents: List of Documents.
|
| 149 |
+
:return: List of scores.
|
| 150 |
+
"""
|
| 151 |
+
docs_with_scores = []
|
| 152 |
+
scores = []
|
| 153 |
+
docs_missing_scores = []
|
| 154 |
+
for doc in documents:
|
| 155 |
+
score = self._get_doc_score(doc=doc, score_field=self.score_field)
|
| 156 |
+
if score is None:
|
| 157 |
+
docs_missing_scores.append(doc)
|
| 158 |
+
else:
|
| 159 |
+
scores.append(score)
|
| 160 |
+
docs_with_scores.append(doc)
|
| 161 |
+
|
| 162 |
+
if len(docs_missing_scores) > 0:
|
| 163 |
+
missing_scores_docs_ids = [d.id for d in docs_missing_scores if d.id]
|
| 164 |
+
if self.score_field:
|
| 165 |
+
logger.warning(
|
| 166 |
+
"Score field '{score_field}' not found in metadata of documents with IDs: {doc_ids}."
|
| 167 |
+
"Make sure that all documents have a score field '{score_field_2}' in their metadata.",
|
| 168 |
+
score_field=self.score_field,
|
| 169 |
+
doc_ids=",".join(missing_scores_docs_ids),
|
| 170 |
+
score_field_2=self.score_field,
|
| 171 |
+
)
|
| 172 |
+
else:
|
| 173 |
+
logger.warning(
|
| 174 |
+
"Ensure all documents have a valid score value. These documents {doc_ids} are missing scores.",
|
| 175 |
+
doc_ids=",".join(missing_scores_docs_ids),
|
| 176 |
+
)
|
| 177 |
+
return docs_with_scores, scores
|
testbed/deepset-ai__haystack/haystack/components/validators/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.validators.json_schema import JsonSchemaValidator
|
| 6 |
+
|
| 7 |
+
__all__ = ["JsonSchemaValidator"]
|
testbed/deepset-ai__haystack/haystack/components/validators/json_schema.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
from haystack import component
|
| 9 |
+
from haystack.dataclasses import ChatMessage
|
| 10 |
+
from haystack.lazy_imports import LazyImport
|
| 11 |
+
|
| 12 |
+
with LazyImport(message="Run 'pip install jsonschema'") as jsonschema_import:
|
| 13 |
+
from jsonschema import ValidationError, validate
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def is_valid_json(s: str) -> bool:
|
| 17 |
+
"""
|
| 18 |
+
Check if the provided string is a valid JSON.
|
| 19 |
+
|
| 20 |
+
:param s: The string to be checked.
|
| 21 |
+
:returns: `True` if the string is a valid JSON; otherwise, `False`.
|
| 22 |
+
"""
|
| 23 |
+
try:
|
| 24 |
+
json.loads(s)
|
| 25 |
+
except ValueError:
|
| 26 |
+
return False
|
| 27 |
+
return True
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@component
|
| 31 |
+
class JsonSchemaValidator:
|
| 32 |
+
"""
|
| 33 |
+
Validates JSON content of `ChatMessage` against a specified [JSON Schema](https://json-schema.org/).
|
| 34 |
+
|
| 35 |
+
If JSON content of a message conforms to the provided schema, the message is passed along the "validated" output.
|
| 36 |
+
If the JSON content does not conform to the schema, the message is passed along the "validation_error" output.
|
| 37 |
+
In the latter case, the error message is constructed using the provided `error_template` or a default template.
|
| 38 |
+
These error ChatMessages can be used by LLMs in Haystack 2.x recovery loops.
|
| 39 |
+
|
| 40 |
+
Usage example:
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
from typing import List
|
| 44 |
+
|
| 45 |
+
from haystack import Pipeline
|
| 46 |
+
from haystack.components.generators.chat import OpenAIChatGenerator
|
| 47 |
+
from haystack.components.joiners import BranchJoiner
|
| 48 |
+
from haystack.components.validators import JsonSchemaValidator
|
| 49 |
+
from haystack import component
|
| 50 |
+
from haystack.dataclasses import ChatMessage
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@component
|
| 54 |
+
class MessageProducer:
|
| 55 |
+
|
| 56 |
+
@component.output_types(messages=List[ChatMessage])
|
| 57 |
+
def run(self, messages: List[ChatMessage]) -> dict:
|
| 58 |
+
return {"messages": messages}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
p = Pipeline()
|
| 62 |
+
p.add_component("llm", OpenAIChatGenerator(model="gpt-4-1106-preview",
|
| 63 |
+
generation_kwargs={"response_format": {"type": "json_object"}}))
|
| 64 |
+
p.add_component("schema_validator", JsonSchemaValidator())
|
| 65 |
+
p.add_component("joiner_for_llm", BranchJoiner(List[ChatMessage]))
|
| 66 |
+
p.add_component("message_producer", MessageProducer())
|
| 67 |
+
|
| 68 |
+
p.connect("message_producer.messages", "joiner_for_llm")
|
| 69 |
+
p.connect("joiner_for_llm", "llm")
|
| 70 |
+
p.connect("llm.replies", "schema_validator.messages")
|
| 71 |
+
p.connect("schema_validator.validation_error", "joiner_for_llm")
|
| 72 |
+
|
| 73 |
+
result = p.run(data={
|
| 74 |
+
"message_producer": {
|
| 75 |
+
"messages":[ChatMessage.from_user("Generate JSON for person with name 'John' and age 30")]},
|
| 76 |
+
"schema_validator": {
|
| 77 |
+
"json_schema": {
|
| 78 |
+
"type": "object",
|
| 79 |
+
"properties": {"name": {"type": "string"},
|
| 80 |
+
"age": {"type": "integer"}
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
})
|
| 85 |
+
print(result)
|
| 86 |
+
>> {'schema_validator': {'validated': [ChatMessage(content='\\n{\\n "name": "John",\\n "age": 30\\n}',
|
| 87 |
+
role=<ChatRole.ASSISTANT: 'assistant'>, name=None, meta={'model': 'gpt-4-1106-preview', 'index': 0,
|
| 88 |
+
'finish_reason': 'stop', 'usage': {'completion_tokens': 17, 'prompt_tokens': 20, 'total_tokens': 37}})]}}
|
| 89 |
+
```
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
# Default error description template
|
| 93 |
+
default_error_template = (
|
| 94 |
+
"The following generated JSON does not conform to the provided schema.\n"
|
| 95 |
+
"Generated JSON: {failing_json}\n"
|
| 96 |
+
"Error details:\n- Message: {error_message}\n"
|
| 97 |
+
"- Error Path in JSON: {error_path}\n"
|
| 98 |
+
"- Schema Path: {error_schema_path}\n"
|
| 99 |
+
"Please match the following schema:\n"
|
| 100 |
+
"{json_schema}\n"
|
| 101 |
+
"and provide the corrected JSON content ONLY. Please do not output anything else than the raw corrected "
|
| 102 |
+
"JSON string, this is the most important part of the task. Don't use any markdown and don't add any comment."
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
def __init__(self, json_schema: Optional[Dict[str, Any]] = None, error_template: Optional[str] = None):
|
| 106 |
+
"""
|
| 107 |
+
Initialize the JsonSchemaValidator component.
|
| 108 |
+
|
| 109 |
+
:param json_schema: A dictionary representing the [JSON schema](https://json-schema.org/) against which
|
| 110 |
+
the messages' content is validated.
|
| 111 |
+
:param error_template: A custom template string for formatting the error message in case of validation failure.
|
| 112 |
+
"""
|
| 113 |
+
jsonschema_import.check()
|
| 114 |
+
self.json_schema = json_schema
|
| 115 |
+
self.error_template = error_template
|
| 116 |
+
|
| 117 |
+
@component.output_types(validated=List[ChatMessage], validation_error=List[ChatMessage])
|
| 118 |
+
def run(
|
| 119 |
+
self,
|
| 120 |
+
messages: List[ChatMessage],
|
| 121 |
+
json_schema: Optional[Dict[str, Any]] = None,
|
| 122 |
+
error_template: Optional[str] = None,
|
| 123 |
+
) -> Dict[str, List[ChatMessage]]:
|
| 124 |
+
"""
|
| 125 |
+
Validates the last of the provided messages against the specified json schema.
|
| 126 |
+
|
| 127 |
+
If it does, the message is passed along the "validated" output. If it does not, the message is passed along
|
| 128 |
+
the "validation_error" output.
|
| 129 |
+
|
| 130 |
+
:param messages: A list of ChatMessage instances to be validated. The last message in this list is the one
|
| 131 |
+
that is validated.
|
| 132 |
+
:param json_schema: A dictionary representing the [JSON schema](https://json-schema.org/)
|
| 133 |
+
against which the messages' content is validated. If not provided, the schema from the component init
|
| 134 |
+
is used.
|
| 135 |
+
:param error_template: A custom template string for formatting the error message in case of validation. If not
|
| 136 |
+
provided, the `error_template` from the component init is used.
|
| 137 |
+
:return: A dictionary with the following keys:
|
| 138 |
+
- "validated": A list of messages if the last message is valid.
|
| 139 |
+
- "validation_error": A list of messages if the last message is invalid.
|
| 140 |
+
:raises ValueError: If no JSON schema is provided or if the message content is not a dictionary or a list of
|
| 141 |
+
dictionaries.
|
| 142 |
+
"""
|
| 143 |
+
last_message = messages[-1]
|
| 144 |
+
if not is_valid_json(last_message.content):
|
| 145 |
+
return {
|
| 146 |
+
"validation_error": [
|
| 147 |
+
ChatMessage.from_user(
|
| 148 |
+
f"The message '{last_message.content}' is not a valid JSON object. "
|
| 149 |
+
f"Please provide only a valid JSON object in string format."
|
| 150 |
+
f"Don't use any markdown and don't add any comment."
|
| 151 |
+
)
|
| 152 |
+
]
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
last_message_content = json.loads(last_message.content)
|
| 156 |
+
json_schema = json_schema or self.json_schema
|
| 157 |
+
error_template = error_template or self.error_template or self.default_error_template
|
| 158 |
+
|
| 159 |
+
if not json_schema:
|
| 160 |
+
raise ValueError("Provide a JSON schema for validation either in the run method or in the component init.")
|
| 161 |
+
# fc payload is json object but subtree `parameters` is string - we need to convert to json object
|
| 162 |
+
# we need complete json to validate it against schema
|
| 163 |
+
last_message_json = self._recursive_json_to_object(last_message_content)
|
| 164 |
+
using_openai_schema: bool = self._is_openai_function_calling_schema(json_schema)
|
| 165 |
+
if using_openai_schema:
|
| 166 |
+
validation_schema = json_schema["parameters"]
|
| 167 |
+
else:
|
| 168 |
+
validation_schema = json_schema
|
| 169 |
+
try:
|
| 170 |
+
last_message_json = [last_message_json] if not isinstance(last_message_json, list) else last_message_json
|
| 171 |
+
for content in last_message_json:
|
| 172 |
+
if using_openai_schema:
|
| 173 |
+
validate(instance=content["function"]["arguments"], schema=validation_schema)
|
| 174 |
+
else:
|
| 175 |
+
validate(instance=content, schema=validation_schema)
|
| 176 |
+
|
| 177 |
+
return {"validated": [last_message]}
|
| 178 |
+
except ValidationError as e:
|
| 179 |
+
error_path = " -> ".join(map(str, e.absolute_path)) if e.absolute_path else "N/A"
|
| 180 |
+
error_schema_path = " -> ".join(map(str, e.absolute_schema_path)) if e.absolute_schema_path else "N/A"
|
| 181 |
+
|
| 182 |
+
error_template = error_template or self.default_error_template
|
| 183 |
+
|
| 184 |
+
recovery_prompt = self._construct_error_recovery_message(
|
| 185 |
+
error_template,
|
| 186 |
+
str(e),
|
| 187 |
+
error_path,
|
| 188 |
+
error_schema_path,
|
| 189 |
+
validation_schema,
|
| 190 |
+
failing_json=last_message.content,
|
| 191 |
+
)
|
| 192 |
+
return {"validation_error": [ChatMessage.from_user(recovery_prompt)]}
|
| 193 |
+
|
| 194 |
+
def _construct_error_recovery_message(
|
| 195 |
+
self,
|
| 196 |
+
error_template: str,
|
| 197 |
+
error_message: str,
|
| 198 |
+
error_path: str,
|
| 199 |
+
error_schema_path: str,
|
| 200 |
+
json_schema: Dict[str, Any],
|
| 201 |
+
failing_json: str,
|
| 202 |
+
) -> str:
|
| 203 |
+
"""
|
| 204 |
+
Constructs an error recovery message using a specified template or the default one if none is provided.
|
| 205 |
+
|
| 206 |
+
:param error_template: A custom template string for formatting the error message in case of validation failure.
|
| 207 |
+
:param error_message: The error message returned by the JSON schema validator.
|
| 208 |
+
:param error_path: The path in the JSON content where the error occurred.
|
| 209 |
+
:param error_schema_path: The path in the JSON schema where the error occurred.
|
| 210 |
+
:param json_schema: The JSON schema against which the content is validated.
|
| 211 |
+
:param failing_json: The generated invalid JSON string.
|
| 212 |
+
"""
|
| 213 |
+
error_template = error_template or self.default_error_template
|
| 214 |
+
|
| 215 |
+
return error_template.format(
|
| 216 |
+
error_message=error_message,
|
| 217 |
+
error_path=error_path,
|
| 218 |
+
error_schema_path=error_schema_path,
|
| 219 |
+
json_schema=json_schema,
|
| 220 |
+
failing_json=failing_json,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def _is_openai_function_calling_schema(self, json_schema: Dict[str, Any]) -> bool:
|
| 224 |
+
"""
|
| 225 |
+
Checks if the provided schema is a valid OpenAI function calling schema.
|
| 226 |
+
|
| 227 |
+
:param json_schema: The JSON schema to check
|
| 228 |
+
:return: `True` if the schema is a valid OpenAI function calling schema; otherwise, `False`.
|
| 229 |
+
"""
|
| 230 |
+
return all(key in json_schema for key in ["name", "description", "parameters"])
|
| 231 |
+
|
| 232 |
+
def _recursive_json_to_object(self, data: Any) -> Any:
|
| 233 |
+
"""
|
| 234 |
+
Convert any string values that are valid JSON objects into dictionary objects.
|
| 235 |
+
|
| 236 |
+
Returns a new data structure.
|
| 237 |
+
|
| 238 |
+
:param data: The data structure to be traversed.
|
| 239 |
+
:return: A new data structure with JSON strings converted to dictionary objects.
|
| 240 |
+
"""
|
| 241 |
+
if isinstance(data, list):
|
| 242 |
+
return [self._recursive_json_to_object(item) for item in data]
|
| 243 |
+
|
| 244 |
+
if isinstance(data, dict):
|
| 245 |
+
new_dict = {}
|
| 246 |
+
for key, value in data.items():
|
| 247 |
+
if isinstance(value, str):
|
| 248 |
+
try:
|
| 249 |
+
json_value = json.loads(value)
|
| 250 |
+
if isinstance(json_value, (dict, list)):
|
| 251 |
+
new_dict[key] = self._recursive_json_to_object(json_value)
|
| 252 |
+
else:
|
| 253 |
+
new_dict[key] = value # Preserve the original string value
|
| 254 |
+
except json.JSONDecodeError:
|
| 255 |
+
new_dict[key] = value
|
| 256 |
+
elif isinstance(value, dict):
|
| 257 |
+
new_dict[key] = self._recursive_json_to_object(value)
|
| 258 |
+
else:
|
| 259 |
+
new_dict[key] = value
|
| 260 |
+
return new_dict
|
| 261 |
+
|
| 262 |
+
# If it's neither a list nor a dictionary, return the value directly
|
| 263 |
+
raise ValueError("Input must be a dictionary or a list of dictionaries.")
|
testbed/deepset-ai__haystack/haystack/components/websearch/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.websearch.searchapi import SearchApiWebSearch
|
| 6 |
+
from haystack.components.websearch.serper_dev import SerperDevWebSearch
|
| 7 |
+
|
| 8 |
+
__all__ = ["SerperDevWebSearch", "SearchApiWebSearch"]
|
testbed/deepset-ai__haystack/haystack/components/websearch/searchapi.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Optional, Union
|
| 6 |
+
|
| 7 |
+
import requests
|
| 8 |
+
|
| 9 |
+
from haystack import ComponentError, Document, component, default_from_dict, default_to_dict, logging
|
| 10 |
+
from haystack.utils import Secret, deserialize_secrets_inplace
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
SEARCHAPI_BASE_URL = "https://www.searchapi.io/api/v1/search"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SearchApiError(ComponentError): ...
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@component
|
| 22 |
+
class SearchApiWebSearch:
|
| 23 |
+
"""
|
| 24 |
+
Uses [SearchApi](https://www.searchapi.io/) to search the web for relevant documents.
|
| 25 |
+
|
| 26 |
+
Usage example:
|
| 27 |
+
```python
|
| 28 |
+
from haystack.components.websearch import SearchApiWebSearch
|
| 29 |
+
from haystack.utils import Secret
|
| 30 |
+
|
| 31 |
+
websearch = SearchApiWebSearch(top_k=10, api_key=Secret.from_token("test-api-key"))
|
| 32 |
+
results = websearch.run(query="Who is the boyfriend of Olivia Wilde?")
|
| 33 |
+
|
| 34 |
+
assert results["documents"]
|
| 35 |
+
assert results["links"]
|
| 36 |
+
```
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
api_key: Secret = Secret.from_env_var("SEARCHAPI_API_KEY"),
|
| 42 |
+
top_k: Optional[int] = 10,
|
| 43 |
+
allowed_domains: Optional[List[str]] = None,
|
| 44 |
+
search_params: Optional[Dict[str, Any]] = None,
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Initialize the SearchApiWebSearch component.
|
| 48 |
+
|
| 49 |
+
:param api_key: API key for the SearchApi API
|
| 50 |
+
:param top_k: Number of documents to return.
|
| 51 |
+
:param allowed_domains: List of domains to limit the search to.
|
| 52 |
+
:param search_params: Additional parameters passed to the SearchApi API.
|
| 53 |
+
For example, you can set 'num' to 100 to increase the number of search results.
|
| 54 |
+
See the [SearchApi website](https://www.searchapi.io/) for more details.
|
| 55 |
+
|
| 56 |
+
The default search engine is Google, however, users can change it by setting the `engine`
|
| 57 |
+
parameter in the `search_params`.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
self.api_key = api_key
|
| 61 |
+
self.top_k = top_k
|
| 62 |
+
self.allowed_domains = allowed_domains
|
| 63 |
+
self.search_params = search_params or {}
|
| 64 |
+
if "engine" not in self.search_params:
|
| 65 |
+
self.search_params["engine"] = "google"
|
| 66 |
+
|
| 67 |
+
# Ensure that the API key is resolved.
|
| 68 |
+
_ = self.api_key.resolve_value()
|
| 69 |
+
|
| 70 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 71 |
+
"""
|
| 72 |
+
Serializes the component to a dictionary.
|
| 73 |
+
|
| 74 |
+
:returns:
|
| 75 |
+
Dictionary with serialized data.
|
| 76 |
+
"""
|
| 77 |
+
return default_to_dict(
|
| 78 |
+
self,
|
| 79 |
+
top_k=self.top_k,
|
| 80 |
+
allowed_domains=self.allowed_domains,
|
| 81 |
+
search_params=self.search_params,
|
| 82 |
+
api_key=self.api_key.to_dict(),
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
@classmethod
|
| 86 |
+
def from_dict(cls, data: Dict[str, Any]) -> "SearchApiWebSearch":
|
| 87 |
+
"""
|
| 88 |
+
Deserializes the component from a dictionary.
|
| 89 |
+
|
| 90 |
+
:param data:
|
| 91 |
+
The dictionary to deserialize from.
|
| 92 |
+
:returns:
|
| 93 |
+
The deserialized component.
|
| 94 |
+
"""
|
| 95 |
+
deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
|
| 96 |
+
return default_from_dict(cls, data)
|
| 97 |
+
|
| 98 |
+
@component.output_types(documents=List[Document], links=List[str])
|
| 99 |
+
def run(self, query: str) -> Dict[str, Union[List[Document], List[str]]]:
|
| 100 |
+
"""
|
| 101 |
+
Uses [SearchApi](https://www.searchapi.io/) to search the web.
|
| 102 |
+
|
| 103 |
+
:param query: Search query.
|
| 104 |
+
:returns: A dictionary with the following keys:
|
| 105 |
+
- "documents": List of documents returned by the search engine.
|
| 106 |
+
- "links": List of links returned by the search engine.
|
| 107 |
+
:raises TimeoutError: If the request to the SearchApi API times out.
|
| 108 |
+
:raises SearchApiError: If an error occurs while querying the SearchApi API.
|
| 109 |
+
"""
|
| 110 |
+
query_prepend = "OR ".join(f"site:{domain} " for domain in self.allowed_domains) if self.allowed_domains else ""
|
| 111 |
+
payload = {"q": query_prepend + " " + query, **self.search_params}
|
| 112 |
+
headers = {"Authorization": f"Bearer {self.api_key.resolve_value()}", "X-SearchApi-Source": "Haystack"}
|
| 113 |
+
try:
|
| 114 |
+
response = requests.get(SEARCHAPI_BASE_URL, headers=headers, params=payload, timeout=90)
|
| 115 |
+
response.raise_for_status() # Will raise an HTTPError for bad responses
|
| 116 |
+
except requests.Timeout as error:
|
| 117 |
+
raise TimeoutError(f"Request to {self.__class__.__name__} timed out.") from error
|
| 118 |
+
|
| 119 |
+
except requests.RequestException as e:
|
| 120 |
+
raise SearchApiError(f"An error occurred while querying {self.__class__.__name__}. Error: {e}") from e
|
| 121 |
+
|
| 122 |
+
# Request succeeded
|
| 123 |
+
json_result = response.json()
|
| 124 |
+
|
| 125 |
+
# organic results are the main results from the search engine
|
| 126 |
+
organic_results = []
|
| 127 |
+
if "organic_results" in json_result:
|
| 128 |
+
for result in json_result["organic_results"]:
|
| 129 |
+
organic_results.append(
|
| 130 |
+
Document.from_dict({"title": result["title"], "content": result["snippet"], "link": result["link"]})
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# answer box has a direct answer to the query
|
| 134 |
+
answer_box = []
|
| 135 |
+
if "answer_box" in json_result:
|
| 136 |
+
answer_box = [
|
| 137 |
+
Document.from_dict(
|
| 138 |
+
{
|
| 139 |
+
"title": json_result["answer_box"].get("title", ""),
|
| 140 |
+
"content": json_result["answer_box"].get("answer", ""),
|
| 141 |
+
"link": json_result["answer_box"].get("link", ""),
|
| 142 |
+
}
|
| 143 |
+
)
|
| 144 |
+
]
|
| 145 |
+
|
| 146 |
+
knowledge_graph = []
|
| 147 |
+
if "knowledge_graph" in json_result:
|
| 148 |
+
knowledge_graph = [
|
| 149 |
+
Document.from_dict(
|
| 150 |
+
{
|
| 151 |
+
"title": json_result["knowledge_graph"].get("title", ""),
|
| 152 |
+
"content": json_result["knowledge_graph"].get("description", ""),
|
| 153 |
+
}
|
| 154 |
+
)
|
| 155 |
+
]
|
| 156 |
+
|
| 157 |
+
related_questions = []
|
| 158 |
+
if "related_questions" in json_result:
|
| 159 |
+
for result in json_result["related_questions"]:
|
| 160 |
+
related_questions.append(
|
| 161 |
+
Document.from_dict(
|
| 162 |
+
{
|
| 163 |
+
"title": result["question"],
|
| 164 |
+
"content": result["answer"] if result.get("answer") else result.get("answer_highlight", ""),
|
| 165 |
+
"link": result.get("source", {}).get("link", ""),
|
| 166 |
+
}
|
| 167 |
+
)
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
documents = answer_box + knowledge_graph + organic_results + related_questions
|
| 171 |
+
|
| 172 |
+
links = [result["link"] for result in json_result["organic_results"]]
|
| 173 |
+
|
| 174 |
+
logger.debug(
|
| 175 |
+
"SearchApi returned {number_documents} documents for the query '{query}'",
|
| 176 |
+
number_documents=len(documents),
|
| 177 |
+
query=query,
|
| 178 |
+
)
|
| 179 |
+
return {"documents": documents[: self.top_k], "links": links[: self.top_k]}
|
testbed/deepset-ai__haystack/haystack/components/writers/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.components.writers.document_writer import DocumentWriter
|
| 6 |
+
|
| 7 |
+
__all__ = ["DocumentWriter"]
|
testbed/deepset-ai__haystack/haystack/components/writers/document_writer.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from haystack import Document, component, default_from_dict, default_to_dict, logging
|
| 8 |
+
from haystack.document_stores.types import DocumentStore, DuplicatePolicy
|
| 9 |
+
from haystack.utils import deserialize_document_store_in_init_params_inplace
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@component
|
| 15 |
+
class DocumentWriter:
|
| 16 |
+
"""
|
| 17 |
+
Writes documents to a DocumentStore.
|
| 18 |
+
|
| 19 |
+
### Usage example
|
| 20 |
+
```python
|
| 21 |
+
from haystack import Document
|
| 22 |
+
from haystack.components.writers import DocumentWriter
|
| 23 |
+
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
| 24 |
+
docs = [
|
| 25 |
+
Document(content="Python is a popular programming language"),
|
| 26 |
+
]
|
| 27 |
+
doc_store = InMemoryDocumentStore()
|
| 28 |
+
doc_store.write_documents(docs)
|
| 29 |
+
```
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, document_store: DocumentStore, policy: DuplicatePolicy = DuplicatePolicy.NONE):
|
| 33 |
+
"""
|
| 34 |
+
Create a DocumentWriter component.
|
| 35 |
+
|
| 36 |
+
:param document_store:
|
| 37 |
+
The instance of the document store where you want to store your documents.
|
| 38 |
+
:param policy:
|
| 39 |
+
The policy to apply when a Document with the same ID already exists in the DocumentStore.
|
| 40 |
+
- `DuplicatePolicy.NONE`: Default policy, relies on the DocumentStore settings.
|
| 41 |
+
- `DuplicatePolicy.SKIP`: Skips documents with the same ID and doesn't write them to the DocumentStore.
|
| 42 |
+
- `DuplicatePolicy.OVERWRITE`: Overwrites documents with the same ID.
|
| 43 |
+
- `DuplicatePolicy.FAIL`: Raises an error if a Document with the same ID is already in the DocumentStore.
|
| 44 |
+
"""
|
| 45 |
+
self.document_store = document_store
|
| 46 |
+
self.policy = policy
|
| 47 |
+
|
| 48 |
+
def _get_telemetry_data(self) -> Dict[str, Any]:
|
| 49 |
+
"""
|
| 50 |
+
Data that is sent to Posthog for usage analytics.
|
| 51 |
+
"""
|
| 52 |
+
return {"document_store": type(self.document_store).__name__}
|
| 53 |
+
|
| 54 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 55 |
+
"""
|
| 56 |
+
Serializes the component to a dictionary.
|
| 57 |
+
|
| 58 |
+
:returns:
|
| 59 |
+
Dictionary with serialized data.
|
| 60 |
+
"""
|
| 61 |
+
return default_to_dict(self, document_store=self.document_store.to_dict(), policy=self.policy.name)
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def from_dict(cls, data: Dict[str, Any]) -> "DocumentWriter":
|
| 65 |
+
"""
|
| 66 |
+
Deserializes the component from a dictionary.
|
| 67 |
+
|
| 68 |
+
:param data:
|
| 69 |
+
The dictionary to deserialize from.
|
| 70 |
+
:returns:
|
| 71 |
+
The deserialized component.
|
| 72 |
+
|
| 73 |
+
:raises DeserializationError:
|
| 74 |
+
If the document store is not properly specified in the serialization data or its type cannot be imported.
|
| 75 |
+
"""
|
| 76 |
+
# deserialize the document store
|
| 77 |
+
deserialize_document_store_in_init_params_inplace(data)
|
| 78 |
+
|
| 79 |
+
data["init_parameters"]["policy"] = DuplicatePolicy[data["init_parameters"]["policy"]]
|
| 80 |
+
|
| 81 |
+
return default_from_dict(cls, data)
|
| 82 |
+
|
| 83 |
+
@component.output_types(documents_written=int)
|
| 84 |
+
def run(self, documents: List[Document], policy: Optional[DuplicatePolicy] = None):
|
| 85 |
+
"""
|
| 86 |
+
Run the DocumentWriter on the given input data.
|
| 87 |
+
|
| 88 |
+
:param documents:
|
| 89 |
+
A list of documents to write to the document store.
|
| 90 |
+
:param policy:
|
| 91 |
+
The policy to use when encountering duplicate documents.
|
| 92 |
+
:returns:
|
| 93 |
+
Number of documents written to the document store.
|
| 94 |
+
|
| 95 |
+
:raises ValueError:
|
| 96 |
+
If the specified document store is not found.
|
| 97 |
+
"""
|
| 98 |
+
if policy is None:
|
| 99 |
+
policy = self.policy
|
| 100 |
+
|
| 101 |
+
documents_written = self.document_store.write_documents(documents=documents, policy=policy)
|
| 102 |
+
return {"documents_written": documents_written}
|
testbed/deepset-ai__haystack/haystack/core/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
testbed/deepset-ai__haystack/haystack/core/component/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from haystack.core.component.component import Component, component
|
| 6 |
+
from haystack.core.component.types import InputSocket, OutputSocket
|
| 7 |
+
|
| 8 |
+
__all__ = ["component", "Component", "InputSocket", "OutputSocket"]
|
testbed/deepset-ai__haystack/haystack/core/component/component.py
ADDED
|
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
Attributes:
|
| 7 |
+
|
| 8 |
+
component: Marks a class as a component. Any class decorated with `@component` can be used by a Pipeline.
|
| 9 |
+
|
| 10 |
+
All components must follow the contract below. This docstring is the source of truth for components contract.
|
| 11 |
+
|
| 12 |
+
<hr>
|
| 13 |
+
|
| 14 |
+
`@component` decorator
|
| 15 |
+
|
| 16 |
+
All component classes must be decorated with the `@component` decorator. This allows Canals to discover them.
|
| 17 |
+
|
| 18 |
+
<hr>
|
| 19 |
+
|
| 20 |
+
`__init__(self, **kwargs)`
|
| 21 |
+
|
| 22 |
+
Optional method.
|
| 23 |
+
|
| 24 |
+
Components may have an `__init__` method where they define:
|
| 25 |
+
|
| 26 |
+
- `self.init_parameters = {same parameters that the __init__ method received}`:
|
| 27 |
+
In this dictionary you can store any state the components wish to be persisted when they are saved.
|
| 28 |
+
These values will be given to the `__init__` method of a new instance when the pipeline is loaded.
|
| 29 |
+
Note that by default the `@component` decorator saves the arguments automatically.
|
| 30 |
+
However, if a component sets their own `init_parameters` manually in `__init__()`, that will be used instead.
|
| 31 |
+
Note: all of the values contained here **must be JSON serializable**. Serialize them manually if needed.
|
| 32 |
+
|
| 33 |
+
Components should take only "basic" Python types as parameters of their `__init__` function, or iterables and
|
| 34 |
+
dictionaries containing only such values. Anything else (objects, functions, etc) will raise an exception at init
|
| 35 |
+
time. If there's the need for such values, consider serializing them to a string.
|
| 36 |
+
|
| 37 |
+
_(TODO explain how to use classes and functions in init. In the meantime see `test/components/test_accumulate.py`)_
|
| 38 |
+
|
| 39 |
+
The `__init__` must be extremely lightweight, because it's a frequent operation during the construction and
|
| 40 |
+
validation of the pipeline. If a component has some heavy state to initialize (models, backends, etc...) refer to
|
| 41 |
+
the `warm_up()` method.
|
| 42 |
+
|
| 43 |
+
<hr>
|
| 44 |
+
|
| 45 |
+
`warm_up(self)`
|
| 46 |
+
|
| 47 |
+
Optional method.
|
| 48 |
+
|
| 49 |
+
This method is called by Pipeline before the graph execution. Make sure to avoid double-initializations,
|
| 50 |
+
because Pipeline will not keep track of which components it called `warm_up()` on.
|
| 51 |
+
|
| 52 |
+
<hr>
|
| 53 |
+
|
| 54 |
+
`run(self, data)`
|
| 55 |
+
|
| 56 |
+
Mandatory method.
|
| 57 |
+
|
| 58 |
+
This is the method where the main functionality of the component should be carried out. It's called by
|
| 59 |
+
`Pipeline.run()`.
|
| 60 |
+
|
| 61 |
+
When the component should run, Pipeline will call this method with an instance of the dataclass returned by the
|
| 62 |
+
method decorated with `@component.input`. This dataclass contains:
|
| 63 |
+
|
| 64 |
+
- all the input values coming from other components connected to it,
|
| 65 |
+
- if any is missing, the corresponding value defined in `self.defaults`, if it exists.
|
| 66 |
+
|
| 67 |
+
`run()` must return a single instance of the dataclass declared through the method decorated with
|
| 68 |
+
`@component.output`.
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
import inspect
|
| 73 |
+
import sys
|
| 74 |
+
import warnings
|
| 75 |
+
from collections.abc import Callable
|
| 76 |
+
from contextlib import contextmanager
|
| 77 |
+
from contextvars import ContextVar
|
| 78 |
+
from copy import deepcopy
|
| 79 |
+
from dataclasses import dataclass
|
| 80 |
+
from types import new_class
|
| 81 |
+
from typing import Any, Dict, Optional, Protocol, Type, runtime_checkable
|
| 82 |
+
|
| 83 |
+
from haystack import logging
|
| 84 |
+
from haystack.core.errors import ComponentError
|
| 85 |
+
|
| 86 |
+
from .sockets import Sockets
|
| 87 |
+
from .types import InputSocket, OutputSocket, _empty
|
| 88 |
+
|
| 89 |
+
logger = logging.getLogger(__name__)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@dataclass
|
| 93 |
+
class PreInitHookPayload:
|
| 94 |
+
"""
|
| 95 |
+
Payload for the hook called before a component instance is initialized.
|
| 96 |
+
|
| 97 |
+
:param callback:
|
| 98 |
+
Receives the following inputs: component class and init parameter keyword args.
|
| 99 |
+
:param in_progress:
|
| 100 |
+
Flag to indicate if the hook is currently being executed.
|
| 101 |
+
Used to prevent it from being called recursively (if the component's constructor
|
| 102 |
+
instantiates another component).
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
callback: Callable
|
| 106 |
+
in_progress: bool = False
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
_COMPONENT_PRE_INIT_HOOK: ContextVar[Optional[PreInitHookPayload]] = ContextVar("component_pre_init_hook", default=None)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@contextmanager
|
| 113 |
+
def _hook_component_init(callback: Callable):
|
| 114 |
+
"""
|
| 115 |
+
Context manager to set a callback that will be invoked before a component's constructor is called.
|
| 116 |
+
|
| 117 |
+
The callback receives the component class and the init parameters (as keyword arguments) and can modify the init
|
| 118 |
+
parameters in place.
|
| 119 |
+
|
| 120 |
+
:param callback:
|
| 121 |
+
Callback function to invoke.
|
| 122 |
+
"""
|
| 123 |
+
token = _COMPONENT_PRE_INIT_HOOK.set(PreInitHookPayload(callback))
|
| 124 |
+
try:
|
| 125 |
+
yield
|
| 126 |
+
finally:
|
| 127 |
+
_COMPONENT_PRE_INIT_HOOK.reset(token)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@runtime_checkable
|
| 131 |
+
class Component(Protocol):
|
| 132 |
+
"""
|
| 133 |
+
Note this is only used by type checking tools.
|
| 134 |
+
|
| 135 |
+
In order to implement the `Component` protocol, custom components need to
|
| 136 |
+
have a `run` method. The signature of the method and its return value
|
| 137 |
+
won't be checked, i.e. classes with the following methods:
|
| 138 |
+
|
| 139 |
+
def run(self, param: str) -> Dict[str, Any]:
|
| 140 |
+
...
|
| 141 |
+
|
| 142 |
+
and
|
| 143 |
+
|
| 144 |
+
def run(self, **kwargs):
|
| 145 |
+
...
|
| 146 |
+
|
| 147 |
+
will be both considered as respecting the protocol. This makes the type
|
| 148 |
+
checking much weaker, but we have other places where we ensure code is
|
| 149 |
+
dealing with actual Components.
|
| 150 |
+
|
| 151 |
+
The protocol is runtime checkable so it'll be possible to assert:
|
| 152 |
+
|
| 153 |
+
isinstance(MyComponent, Component)
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
# This is the most reliable way to define the protocol for the `run` method.
|
| 157 |
+
# Defining a method doesn't work as different Components will have different
|
| 158 |
+
# arguments. Even defining here a method with `**kwargs` doesn't work as the
|
| 159 |
+
# expected signature must be identical.
|
| 160 |
+
# This makes most Language Servers and type checkers happy and shows less errors.
|
| 161 |
+
# NOTE: This check can be removed when we drop Python 3.8 support.
|
| 162 |
+
if sys.version_info >= (3, 9):
|
| 163 |
+
run: Callable[..., Dict[str, Any]]
|
| 164 |
+
else:
|
| 165 |
+
run: Callable
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class ComponentMeta(type):
|
| 169 |
+
@staticmethod
|
| 170 |
+
def _positional_to_kwargs(cls_type, args) -> Dict[str, Any]:
|
| 171 |
+
"""
|
| 172 |
+
Convert positional arguments to keyword arguments based on the signature of the `__init__` method.
|
| 173 |
+
"""
|
| 174 |
+
init_signature = inspect.signature(cls_type.__init__)
|
| 175 |
+
init_params = {name: info for name, info in init_signature.parameters.items() if name != "self"}
|
| 176 |
+
|
| 177 |
+
out = {}
|
| 178 |
+
for arg, (name, info) in zip(args, init_params.items()):
|
| 179 |
+
if info.kind == inspect.Parameter.VAR_POSITIONAL:
|
| 180 |
+
raise ComponentError(
|
| 181 |
+
"Pre-init hooks do not support components with variadic positional args in their init method"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
assert info.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY)
|
| 185 |
+
out[name] = arg
|
| 186 |
+
return out
|
| 187 |
+
|
| 188 |
+
@staticmethod
|
| 189 |
+
def _parse_and_set_output_sockets(instance: Any):
|
| 190 |
+
has_async_run = hasattr(instance, "run_async")
|
| 191 |
+
|
| 192 |
+
# If `component.set_output_types()` was called in the component constructor,
|
| 193 |
+
# `__haystack_output__` is already populated, no need to do anything.
|
| 194 |
+
if not hasattr(instance, "__haystack_output__"):
|
| 195 |
+
# If that's not the case, we need to populate `__haystack_output__`
|
| 196 |
+
#
|
| 197 |
+
# If either of the run methods were decorated, they'll have a field assigned that
|
| 198 |
+
# stores the output specification. If both run methods were decorated, we ensure that
|
| 199 |
+
# outputs are the same. We deepcopy the content of the cache to transfer ownership from
|
| 200 |
+
# the class method to the actual instance, so that different instances of the same class
|
| 201 |
+
# won't share this data.
|
| 202 |
+
|
| 203 |
+
run_output_types = getattr(instance.run, "_output_types_cache", {})
|
| 204 |
+
async_run_output_types = getattr(instance.run_async, "_output_types_cache", {}) if has_async_run else {}
|
| 205 |
+
|
| 206 |
+
if has_async_run and run_output_types != async_run_output_types:
|
| 207 |
+
raise ComponentError("Output type specifications of 'run' and 'run_async' methods must be the same")
|
| 208 |
+
output_types_cache = run_output_types
|
| 209 |
+
|
| 210 |
+
instance.__haystack_output__ = Sockets(instance, deepcopy(output_types_cache), OutputSocket)
|
| 211 |
+
|
| 212 |
+
@staticmethod
|
| 213 |
+
def _parse_and_set_input_sockets(component_cls: Type, instance: Any):
|
| 214 |
+
def inner(method, sockets):
|
| 215 |
+
from inspect import Parameter
|
| 216 |
+
|
| 217 |
+
run_signature = inspect.signature(method)
|
| 218 |
+
|
| 219 |
+
for param_name, param_info in run_signature.parameters.items():
|
| 220 |
+
if param_name == "self" or param_info.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
|
| 221 |
+
continue
|
| 222 |
+
|
| 223 |
+
socket_kwargs = {"name": param_name, "type": param_info.annotation}
|
| 224 |
+
if param_info.default != Parameter.empty:
|
| 225 |
+
socket_kwargs["default_value"] = param_info.default
|
| 226 |
+
|
| 227 |
+
new_socket = InputSocket(**socket_kwargs)
|
| 228 |
+
|
| 229 |
+
# Also ensure that new sockets don't override existing ones.
|
| 230 |
+
existing_socket = sockets.get(param_name)
|
| 231 |
+
if existing_socket is not None and existing_socket != new_socket:
|
| 232 |
+
raise ComponentError(
|
| 233 |
+
"set_input_types()/set_input_type() cannot override the parameters of the 'run' method"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
sockets[param_name] = new_socket
|
| 237 |
+
|
| 238 |
+
return run_signature
|
| 239 |
+
|
| 240 |
+
# Create the sockets if set_input_types() wasn't called in the constructor.
|
| 241 |
+
if not hasattr(instance, "__haystack_input__"):
|
| 242 |
+
instance.__haystack_input__ = Sockets(instance, {}, InputSocket)
|
| 243 |
+
|
| 244 |
+
inner(getattr(component_cls, "run"), instance.__haystack_input__)
|
| 245 |
+
|
| 246 |
+
# Ensure that the sockets are the same for the async method, if it exists.
|
| 247 |
+
async_run = getattr(component_cls, "run_async", None)
|
| 248 |
+
if async_run is not None:
|
| 249 |
+
run_sockets = Sockets(instance, {}, InputSocket)
|
| 250 |
+
async_run_sockets = Sockets(instance, {}, InputSocket)
|
| 251 |
+
|
| 252 |
+
# Can't use the sockets from above as they might contain
|
| 253 |
+
# values set with set_input_types().
|
| 254 |
+
run_sig = inner(getattr(component_cls, "run"), run_sockets)
|
| 255 |
+
async_run_sig = inner(async_run, async_run_sockets)
|
| 256 |
+
|
| 257 |
+
if async_run_sockets != run_sockets or run_sig != async_run_sig:
|
| 258 |
+
raise ComponentError("Parameters of 'run' and 'run_async' methods must be the same")
|
| 259 |
+
|
| 260 |
+
def __call__(cls, *args, **kwargs):
|
| 261 |
+
"""
|
| 262 |
+
This method is called when clients instantiate a Component and runs before __new__ and __init__.
|
| 263 |
+
"""
|
| 264 |
+
# This will call __new__ then __init__, giving us back the Component instance
|
| 265 |
+
pre_init_hook = _COMPONENT_PRE_INIT_HOOK.get()
|
| 266 |
+
if pre_init_hook is None or pre_init_hook.in_progress:
|
| 267 |
+
instance = super().__call__(*args, **kwargs)
|
| 268 |
+
else:
|
| 269 |
+
try:
|
| 270 |
+
pre_init_hook.in_progress = True
|
| 271 |
+
named_positional_args = ComponentMeta._positional_to_kwargs(cls, args)
|
| 272 |
+
assert (
|
| 273 |
+
set(named_positional_args.keys()).intersection(kwargs.keys()) == set()
|
| 274 |
+
), "positional and keyword arguments overlap"
|
| 275 |
+
kwargs.update(named_positional_args)
|
| 276 |
+
pre_init_hook.callback(cls, kwargs)
|
| 277 |
+
instance = super().__call__(**kwargs)
|
| 278 |
+
finally:
|
| 279 |
+
pre_init_hook.in_progress = False
|
| 280 |
+
|
| 281 |
+
# Before returning, we have the chance to modify the newly created
|
| 282 |
+
# Component instance, so we take the chance and set up the I/O sockets
|
| 283 |
+
has_async_run = hasattr(instance, "run_async")
|
| 284 |
+
if has_async_run and not inspect.iscoroutinefunction(instance.run_async):
|
| 285 |
+
raise ComponentError(f"Method 'run_async' of component '{cls.__name__}' must be a coroutine")
|
| 286 |
+
instance.__haystack_supports_async__ = has_async_run
|
| 287 |
+
|
| 288 |
+
ComponentMeta._parse_and_set_input_sockets(cls, instance)
|
| 289 |
+
ComponentMeta._parse_and_set_output_sockets(instance)
|
| 290 |
+
|
| 291 |
+
# Since a Component can't be used in multiple Pipelines at the same time
|
| 292 |
+
# we need to know if it's already owned by a Pipeline when adding it to one.
|
| 293 |
+
# We use this flag to check that.
|
| 294 |
+
instance.__haystack_added_to_pipeline__ = None
|
| 295 |
+
|
| 296 |
+
return instance
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def _component_repr(component: Component) -> str:
|
| 300 |
+
"""
|
| 301 |
+
All Components override their __repr__ method with this one.
|
| 302 |
+
|
| 303 |
+
It prints the component name and the input/output sockets.
|
| 304 |
+
"""
|
| 305 |
+
result = object.__repr__(component)
|
| 306 |
+
if pipeline := getattr(component, "__haystack_added_to_pipeline__", None):
|
| 307 |
+
# This Component has been added in a Pipeline, let's get the name from there.
|
| 308 |
+
result += f"\n{pipeline.get_component_name(component)}"
|
| 309 |
+
|
| 310 |
+
# We're explicitly ignoring the type here because we're sure that the component
|
| 311 |
+
# has the __haystack_input__ and __haystack_output__ attributes at this point
|
| 312 |
+
return (
|
| 313 |
+
f'{result}\n{getattr(component, "__haystack_input__", "<invalid_input_sockets>")}'
|
| 314 |
+
f'\n{getattr(component, "__haystack_output__", "<invalid_output_sockets>")}'
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def _component_run_has_kwargs(component_cls: Type) -> bool:
|
| 319 |
+
run_method = getattr(component_cls, "run", None)
|
| 320 |
+
if run_method is None:
|
| 321 |
+
return False
|
| 322 |
+
else:
|
| 323 |
+
return any(
|
| 324 |
+
param.kind == inspect.Parameter.VAR_KEYWORD for param in inspect.signature(run_method).parameters.values()
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class _Component:
|
| 329 |
+
"""
|
| 330 |
+
See module's docstring.
|
| 331 |
+
|
| 332 |
+
Args:
|
| 333 |
+
class_: the class that Canals should use as a component.
|
| 334 |
+
serializable: whether to check, at init time, if the component can be saved with
|
| 335 |
+
`save_pipelines()`.
|
| 336 |
+
|
| 337 |
+
Returns:
|
| 338 |
+
A class that can be recognized as a component.
|
| 339 |
+
|
| 340 |
+
Raises:
|
| 341 |
+
ComponentError: if the class provided has no `run()` method or otherwise doesn't respect the component contract.
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
def __init__(self):
|
| 345 |
+
self.registry = {}
|
| 346 |
+
|
| 347 |
+
def set_input_type(
|
| 348 |
+
self,
|
| 349 |
+
instance,
|
| 350 |
+
name: str,
|
| 351 |
+
type: Any, # noqa: A002
|
| 352 |
+
default: Any = _empty,
|
| 353 |
+
):
|
| 354 |
+
"""
|
| 355 |
+
Add a single input socket to the component instance.
|
| 356 |
+
|
| 357 |
+
Replaces any existing input socket with the same name.
|
| 358 |
+
|
| 359 |
+
:param instance: Component instance where the input type will be added.
|
| 360 |
+
:param name: name of the input socket.
|
| 361 |
+
:param type: type of the input socket.
|
| 362 |
+
:param default: default value of the input socket, defaults to _empty
|
| 363 |
+
"""
|
| 364 |
+
if not _component_run_has_kwargs(instance.__class__):
|
| 365 |
+
raise ComponentError(
|
| 366 |
+
"Cannot set input types on a component that doesn't have a kwargs parameter in the 'run' method"
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
if not hasattr(instance, "__haystack_input__"):
|
| 370 |
+
instance.__haystack_input__ = Sockets(instance, {}, InputSocket)
|
| 371 |
+
instance.__haystack_input__[name] = InputSocket(name=name, type=type, default_value=default)
|
| 372 |
+
|
| 373 |
+
def set_input_types(self, instance, **types):
|
| 374 |
+
"""
|
| 375 |
+
Method that specifies the input types when 'kwargs' is passed to the run method.
|
| 376 |
+
|
| 377 |
+
Use as:
|
| 378 |
+
|
| 379 |
+
```python
|
| 380 |
+
@component
|
| 381 |
+
class MyComponent:
|
| 382 |
+
|
| 383 |
+
def __init__(self, value: int):
|
| 384 |
+
component.set_input_types(self, value_1=str, value_2=str)
|
| 385 |
+
...
|
| 386 |
+
|
| 387 |
+
@component.output_types(output_1=int, output_2=str)
|
| 388 |
+
def run(self, **kwargs):
|
| 389 |
+
return {"output_1": kwargs["value_1"], "output_2": ""}
|
| 390 |
+
```
|
| 391 |
+
|
| 392 |
+
Note that if the `run()` method also specifies some parameters, those will take precedence.
|
| 393 |
+
|
| 394 |
+
For example:
|
| 395 |
+
|
| 396 |
+
```python
|
| 397 |
+
@component
|
| 398 |
+
class MyComponent:
|
| 399 |
+
|
| 400 |
+
def __init__(self, value: int):
|
| 401 |
+
component.set_input_types(self, value_1=str, value_2=str)
|
| 402 |
+
...
|
| 403 |
+
|
| 404 |
+
@component.output_types(output_1=int, output_2=str)
|
| 405 |
+
def run(self, value_0: str, value_1: Optional[str] = None, **kwargs):
|
| 406 |
+
return {"output_1": kwargs["value_1"], "output_2": ""}
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
would add a mandatory `value_0` parameters, make the `value_1`
|
| 410 |
+
parameter optional with a default None, and keep the `value_2`
|
| 411 |
+
parameter mandatory as specified in `set_input_types`.
|
| 412 |
+
|
| 413 |
+
"""
|
| 414 |
+
if not _component_run_has_kwargs(instance.__class__):
|
| 415 |
+
raise ComponentError(
|
| 416 |
+
"Cannot set input types on a component that doesn't have a kwargs parameter in the 'run' method"
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
instance.__haystack_input__ = Sockets(
|
| 420 |
+
instance, {name: InputSocket(name=name, type=type_) for name, type_ in types.items()}, InputSocket
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
def set_output_types(self, instance, **types):
|
| 424 |
+
"""
|
| 425 |
+
Method that specifies the output types when the 'run' method is not decorated with 'component.output_types'.
|
| 426 |
+
|
| 427 |
+
Use as:
|
| 428 |
+
|
| 429 |
+
```python
|
| 430 |
+
@component
|
| 431 |
+
class MyComponent:
|
| 432 |
+
|
| 433 |
+
def __init__(self, value: int):
|
| 434 |
+
component.set_output_types(self, output_1=int, output_2=str)
|
| 435 |
+
...
|
| 436 |
+
|
| 437 |
+
# no decorators here
|
| 438 |
+
def run(self, value: int):
|
| 439 |
+
return {"output_1": 1, "output_2": "2"}
|
| 440 |
+
```
|
| 441 |
+
"""
|
| 442 |
+
has_decorator = hasattr(instance.run, "_output_types_cache")
|
| 443 |
+
if has_decorator:
|
| 444 |
+
raise ComponentError(
|
| 445 |
+
"Cannot call `set_output_types` on a component that already has "
|
| 446 |
+
"the 'output_types' decorator on its `run` method"
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
instance.__haystack_output__ = Sockets(
|
| 450 |
+
instance, {name: OutputSocket(name=name, type=type_) for name, type_ in types.items()}, OutputSocket
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
def output_types(self, **types):
|
| 454 |
+
"""
|
| 455 |
+
Decorator factory that specifies the output types of a component.
|
| 456 |
+
|
| 457 |
+
Use as:
|
| 458 |
+
|
| 459 |
+
```python
|
| 460 |
+
@component
|
| 461 |
+
class MyComponent:
|
| 462 |
+
@component.output_types(output_1=int, output_2=str)
|
| 463 |
+
def run(self, value: int):
|
| 464 |
+
return {"output_1": 1, "output_2": "2"}
|
| 465 |
+
```
|
| 466 |
+
"""
|
| 467 |
+
|
| 468 |
+
def output_types_decorator(run_method):
|
| 469 |
+
"""
|
| 470 |
+
Decorator that sets the output types of the decorated method.
|
| 471 |
+
|
| 472 |
+
This happens at class creation time, and since we don't have the decorated
|
| 473 |
+
class available here, we temporarily store the output types as an attribute of
|
| 474 |
+
the decorated method. The ComponentMeta metaclass will use this data to create
|
| 475 |
+
sockets at instance creation time.
|
| 476 |
+
"""
|
| 477 |
+
method_name = run_method.__name__
|
| 478 |
+
if method_name not in ("run", "run_async"):
|
| 479 |
+
raise ComponentError("'output_types' decorator can only be used on 'run' and 'run_async' methods")
|
| 480 |
+
|
| 481 |
+
setattr(
|
| 482 |
+
run_method,
|
| 483 |
+
"_output_types_cache",
|
| 484 |
+
{name: OutputSocket(name=name, type=type_) for name, type_ in types.items()},
|
| 485 |
+
)
|
| 486 |
+
return run_method
|
| 487 |
+
|
| 488 |
+
return output_types_decorator
|
| 489 |
+
|
| 490 |
+
def _component(self, cls, is_greedy: Optional[bool] = None):
|
| 491 |
+
"""
|
| 492 |
+
Decorator validating the structure of the component and registering it in the components registry.
|
| 493 |
+
"""
|
| 494 |
+
logger.debug("Registering {component} as a component", component=cls)
|
| 495 |
+
|
| 496 |
+
if is_greedy is not None:
|
| 497 |
+
msg = (
|
| 498 |
+
"The 'is_greedy' argument is deprecated and will be removed in version '2.7.0'. "
|
| 499 |
+
"Change the 'Variadic' input of your Component to 'GreedyVariadic' instead."
|
| 500 |
+
)
|
| 501 |
+
warnings.warn(msg, DeprecationWarning)
|
| 502 |
+
else:
|
| 503 |
+
is_greedy = False
|
| 504 |
+
|
| 505 |
+
# Check for required methods and fail as soon as possible
|
| 506 |
+
if not hasattr(cls, "run"):
|
| 507 |
+
raise ComponentError(f"{cls.__name__} must have a 'run()' method. See the docs for more information.")
|
| 508 |
+
|
| 509 |
+
def copy_class_namespace(namespace):
|
| 510 |
+
"""
|
| 511 |
+
This is the callback that `typing.new_class` will use to populate the newly created class.
|
| 512 |
+
|
| 513 |
+
Simply copy the whole namespace from the decorated class.
|
| 514 |
+
"""
|
| 515 |
+
for key, val in dict(cls.__dict__).items():
|
| 516 |
+
# __dict__ and __weakref__ are class-bound, we should let Python recreate them.
|
| 517 |
+
if key in ("__dict__", "__weakref__"):
|
| 518 |
+
continue
|
| 519 |
+
namespace[key] = val
|
| 520 |
+
|
| 521 |
+
# Recreate the decorated component class so it uses our metaclass.
|
| 522 |
+
# We must explicitly redefine the type of the class to make sure language servers
|
| 523 |
+
# and type checkers understand that the class is of the correct type.
|
| 524 |
+
# mypy doesn't like that we do this though so we explicitly ignore the type check.
|
| 525 |
+
cls: cls.__name__ = new_class(cls.__name__, cls.__bases__, {"metaclass": ComponentMeta}, copy_class_namespace) # type: ignore[no-redef]
|
| 526 |
+
|
| 527 |
+
# Save the component in the class registry (for deserialization)
|
| 528 |
+
class_path = f"{cls.__module__}.{cls.__name__}"
|
| 529 |
+
if class_path in self.registry:
|
| 530 |
+
# Corner case, but it may occur easily in notebooks when re-running cells.
|
| 531 |
+
logger.debug(
|
| 532 |
+
"Component {component} is already registered. Previous imported from '{module_name}', \
|
| 533 |
+
new imported from '{new_module_name}'",
|
| 534 |
+
component=class_path,
|
| 535 |
+
module_name=self.registry[class_path],
|
| 536 |
+
new_module_name=cls,
|
| 537 |
+
)
|
| 538 |
+
self.registry[class_path] = cls
|
| 539 |
+
logger.debug("Registered Component {component}", component=cls)
|
| 540 |
+
|
| 541 |
+
# Override the __repr__ method with a default one
|
| 542 |
+
cls.__repr__ = _component_repr
|
| 543 |
+
|
| 544 |
+
return cls
|
| 545 |
+
|
| 546 |
+
def __call__(self, cls: Optional[type] = None, is_greedy: Optional[bool] = None):
|
| 547 |
+
# We must wrap the call to the decorator in a function for it to work
|
| 548 |
+
# correctly with or without parens
|
| 549 |
+
def wrap(cls):
|
| 550 |
+
return self._component(cls, is_greedy=is_greedy)
|
| 551 |
+
|
| 552 |
+
if cls:
|
| 553 |
+
# Decorator is called without parens
|
| 554 |
+
return wrap(cls)
|
| 555 |
+
|
| 556 |
+
# Decorator is called with parens
|
| 557 |
+
return wrap
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
component = _Component()
|
testbed/deepset-ai__haystack/haystack/core/component/sockets.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Dict, Optional, Type, Union
|
| 6 |
+
|
| 7 |
+
from haystack import logging
|
| 8 |
+
from haystack.core.type_utils import _type_name
|
| 9 |
+
|
| 10 |
+
from .types import InputSocket, OutputSocket
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
SocketsDict = Dict[str, Union[InputSocket, OutputSocket]]
|
| 15 |
+
SocketsIOType = Union[Type[InputSocket], Type[OutputSocket]]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Sockets:
|
| 19 |
+
"""
|
| 20 |
+
Represents the inputs or outputs of a `Component`.
|
| 21 |
+
|
| 22 |
+
Depending on the type passed to the constructor, it will represent either the inputs or the outputs of
|
| 23 |
+
the `Component`.
|
| 24 |
+
|
| 25 |
+
Usage:
|
| 26 |
+
```python
|
| 27 |
+
from typing import Any
|
| 28 |
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
| 29 |
+
from haystack.core.component.sockets import Sockets
|
| 30 |
+
from haystack.core.component.types import InputSocket, OutputSocket
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
prompt_template = \"""
|
| 34 |
+
Given these documents, answer the question.\nDocuments:
|
| 35 |
+
{% for doc in documents %}
|
| 36 |
+
{{ doc.content }}
|
| 37 |
+
{% endfor %}
|
| 38 |
+
|
| 39 |
+
\nQuestion: {{question}}
|
| 40 |
+
\nAnswer:
|
| 41 |
+
\"""
|
| 42 |
+
|
| 43 |
+
prompt_builder = PromptBuilder(template=prompt_template)
|
| 44 |
+
sockets = {"question": InputSocket("question", Any), "documents": InputSocket("documents", Any)}
|
| 45 |
+
inputs = Sockets(component=prompt_builder, sockets_dict=sockets, sockets_io_type=InputSocket)
|
| 46 |
+
inputs
|
| 47 |
+
>>> Inputs:
|
| 48 |
+
>>> - question: Any
|
| 49 |
+
>>> - documents: Any
|
| 50 |
+
|
| 51 |
+
inputs.question
|
| 52 |
+
>>> InputSocket(name='question', type=typing.Any, default_value=<class 'haystack.core.component.types._empty'>, ...
|
| 53 |
+
```
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
# We're using a forward declaration here to avoid a circular import.
|
| 57 |
+
def __init__(
|
| 58 |
+
self,
|
| 59 |
+
component: "Component", # type: ignore[name-defined] # noqa: F821
|
| 60 |
+
sockets_dict: SocketsDict,
|
| 61 |
+
sockets_io_type: SocketsIOType,
|
| 62 |
+
):
|
| 63 |
+
"""
|
| 64 |
+
Create a new Sockets object.
|
| 65 |
+
|
| 66 |
+
We don't do any enforcement on the types of the sockets here, the `sockets_type` is only used for
|
| 67 |
+
the `__repr__` method.
|
| 68 |
+
We could do without it and use the type of a random value in the `sockets` dict, but that wouldn't
|
| 69 |
+
work for components that have no sockets at all. Either input or output.
|
| 70 |
+
|
| 71 |
+
:param component:
|
| 72 |
+
The component that these sockets belong to.
|
| 73 |
+
:param sockets_dict:
|
| 74 |
+
A dictionary of sockets.
|
| 75 |
+
:param sockets_io_type:
|
| 76 |
+
The type of the sockets.
|
| 77 |
+
"""
|
| 78 |
+
self._sockets_io_type = sockets_io_type
|
| 79 |
+
self._component = component
|
| 80 |
+
self._sockets_dict = sockets_dict
|
| 81 |
+
self.__dict__.update(sockets_dict)
|
| 82 |
+
|
| 83 |
+
def __eq__(self, value: object) -> bool:
|
| 84 |
+
if not isinstance(value, Sockets):
|
| 85 |
+
return False
|
| 86 |
+
|
| 87 |
+
return (
|
| 88 |
+
self._sockets_io_type == value._sockets_io_type
|
| 89 |
+
and self._component == value._component
|
| 90 |
+
and self._sockets_dict == value._sockets_dict
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
def __setitem__(self, key: str, socket: Union[InputSocket, OutputSocket]):
|
| 94 |
+
"""
|
| 95 |
+
Adds a new socket to this Sockets object.
|
| 96 |
+
|
| 97 |
+
This eases a bit updating the list of sockets after Sockets has been created.
|
| 98 |
+
That should happen only in the `component` decorator.
|
| 99 |
+
"""
|
| 100 |
+
self._sockets_dict[key] = socket
|
| 101 |
+
self.__dict__[key] = socket
|
| 102 |
+
|
| 103 |
+
def __contains__(self, key: str) -> bool:
|
| 104 |
+
return key in self._sockets_dict
|
| 105 |
+
|
| 106 |
+
def get(
|
| 107 |
+
self, key: str, default: Optional[Union[InputSocket, OutputSocket]] = None
|
| 108 |
+
) -> Optional[Union[InputSocket, OutputSocket]]:
|
| 109 |
+
"""
|
| 110 |
+
Get a socket from the Sockets object.
|
| 111 |
+
|
| 112 |
+
:param key:
|
| 113 |
+
The name of the socket to get.
|
| 114 |
+
:param default:
|
| 115 |
+
The value to return if the key is not found.
|
| 116 |
+
:returns:
|
| 117 |
+
The socket with the given key or `default` if the key is not found.
|
| 118 |
+
"""
|
| 119 |
+
return self._sockets_dict.get(key, default)
|
| 120 |
+
|
| 121 |
+
def _component_name(self) -> str:
|
| 122 |
+
if pipeline := getattr(self._component, "__haystack_added_to_pipeline__"):
|
| 123 |
+
# This Component has been added in a Pipeline, let's get the name from there.
|
| 124 |
+
return pipeline.get_component_name(self._component)
|
| 125 |
+
|
| 126 |
+
# This Component has not been added to a Pipeline yet, so we can't know its name.
|
| 127 |
+
# Let's use default __repr__. We don't call repr() directly as Components have a custom
|
| 128 |
+
# __repr__ method and that would lead to infinite recursion since we call Sockets.__repr__ in it.
|
| 129 |
+
return object.__repr__(self._component)
|
| 130 |
+
|
| 131 |
+
def __getattribute__(self, name):
|
| 132 |
+
try:
|
| 133 |
+
sockets = object.__getattribute__(self, "_sockets")
|
| 134 |
+
if name in sockets:
|
| 135 |
+
return sockets[name]
|
| 136 |
+
except AttributeError:
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
return object.__getattribute__(self, name)
|
| 140 |
+
|
| 141 |
+
def __repr__(self) -> str:
|
| 142 |
+
result = ""
|
| 143 |
+
if self._sockets_io_type == InputSocket:
|
| 144 |
+
result = "Inputs:\n"
|
| 145 |
+
elif self._sockets_io_type == OutputSocket:
|
| 146 |
+
result = "Outputs:\n"
|
| 147 |
+
|
| 148 |
+
return result + "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
|
testbed/deepset-ai__haystack/haystack/core/component/types.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from dataclasses import dataclass, field
|
| 6 |
+
from typing import Any, Iterable, List, Type, TypeVar, get_args
|
| 7 |
+
|
| 8 |
+
from typing_extensions import Annotated, TypeAlias # Python 3.8 compatibility
|
| 9 |
+
|
| 10 |
+
HAYSTACK_VARIADIC_ANNOTATION = "__haystack__variadic_t"
|
| 11 |
+
HAYSTACK_GREEDY_VARIADIC_ANNOTATION = "__haystack__greedy_variadic_t"
|
| 12 |
+
|
| 13 |
+
# # Generic type variable used in the Variadic container
|
| 14 |
+
T = TypeVar("T")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Variadic is a custom annotation type we use to mark input types.
|
| 18 |
+
# This type doesn't do anything else than "marking" the contained
|
| 19 |
+
# type so it can be used in the `InputSocket` creation where we
|
| 20 |
+
# check that its annotation equals to HAYSTACK_VARIADIC_ANNOTATION
|
| 21 |
+
Variadic: TypeAlias = Annotated[Iterable[T], HAYSTACK_VARIADIC_ANNOTATION]
|
| 22 |
+
|
| 23 |
+
# GreedyVariadic type is similar to Variadic.
|
| 24 |
+
# The only difference is the way it's treated by the Pipeline when input is received
|
| 25 |
+
# in a socket with this type.
|
| 26 |
+
# Instead of waiting for other inputs to be received, Components that have a GreedyVariadic
|
| 27 |
+
# input will be run right after receiving the first input.
|
| 28 |
+
# Even if there are multiple connections to that socket.
|
| 29 |
+
GreedyVariadic: TypeAlias = Annotated[Iterable[T], HAYSTACK_GREEDY_VARIADIC_ANNOTATION]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class _empty:
|
| 33 |
+
"""Custom object for marking InputSocket.default_value as not set."""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class InputSocket:
|
| 38 |
+
"""
|
| 39 |
+
Represents an input of a `Component`.
|
| 40 |
+
|
| 41 |
+
:param name:
|
| 42 |
+
The name of the input.
|
| 43 |
+
:param type:
|
| 44 |
+
The type of the input.
|
| 45 |
+
:param default_value:
|
| 46 |
+
The default value of the input. If not set, the input is mandatory.
|
| 47 |
+
:param is_variadic:
|
| 48 |
+
Whether the input is variadic or not.
|
| 49 |
+
:param is_greedy
|
| 50 |
+
Whether the input is a greedy variadic or not.
|
| 51 |
+
:param senders:
|
| 52 |
+
The list of components that send data to this input.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
name: str
|
| 56 |
+
type: Type
|
| 57 |
+
default_value: Any = _empty
|
| 58 |
+
is_variadic: bool = field(init=False)
|
| 59 |
+
is_greedy: bool = field(init=False)
|
| 60 |
+
senders: List[str] = field(default_factory=list)
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def is_mandatory(self):
|
| 64 |
+
"""Check if the input is mandatory."""
|
| 65 |
+
return self.default_value == _empty
|
| 66 |
+
|
| 67 |
+
def __post_init__(self):
|
| 68 |
+
try:
|
| 69 |
+
# __metadata__ is a tuple
|
| 70 |
+
self.is_variadic = self.type.__metadata__[0] in [
|
| 71 |
+
HAYSTACK_VARIADIC_ANNOTATION,
|
| 72 |
+
HAYSTACK_GREEDY_VARIADIC_ANNOTATION,
|
| 73 |
+
]
|
| 74 |
+
self.is_greedy = self.type.__metadata__[0] == HAYSTACK_GREEDY_VARIADIC_ANNOTATION
|
| 75 |
+
except AttributeError:
|
| 76 |
+
self.is_variadic = False
|
| 77 |
+
self.is_greedy = False
|
| 78 |
+
if self.is_variadic:
|
| 79 |
+
# We need to "unpack" the type inside the Variadic annotation,
|
| 80 |
+
# otherwise the pipeline connection api will try to match
|
| 81 |
+
# `Annotated[type, HAYSTACK_VARIADIC_ANNOTATION]`.
|
| 82 |
+
#
|
| 83 |
+
# Note1: Variadic is expressed as an annotation of one single type,
|
| 84 |
+
# so the return value of get_args will always be a one-item tuple.
|
| 85 |
+
#
|
| 86 |
+
# Note2: a pipeline always passes a list of items when a component
|
| 87 |
+
# input is declared as Variadic, so the type itself always wraps
|
| 88 |
+
# an iterable of the declared type. For example, Variadic[int]
|
| 89 |
+
# is eventually an alias for Iterable[int]. Since we're interested
|
| 90 |
+
# in getting the inner type `int`, we call `get_args` twice: the
|
| 91 |
+
# first time to get `List[int]` out of `Variadic`, the second time
|
| 92 |
+
# to get `int` out of `List[int]`.
|
| 93 |
+
self.type = get_args(get_args(self.type)[0])[0]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@dataclass
|
| 97 |
+
class OutputSocket:
|
| 98 |
+
"""
|
| 99 |
+
Represents an output of a `Component`.
|
| 100 |
+
|
| 101 |
+
:param name:
|
| 102 |
+
The name of the output.
|
| 103 |
+
:param type:
|
| 104 |
+
The type of the output.
|
| 105 |
+
:param receivers:
|
| 106 |
+
The list of components that receive the output of this component.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
name: str
|
| 110 |
+
type: type
|
| 111 |
+
receivers: List[str] = field(default_factory=list)
|
testbed/deepset-ai__haystack/haystack/core/errors.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class PipelineError(Exception):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PipelineRuntimeError(Exception):
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class PipelineConnectError(PipelineError):
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PipelineValidationError(PipelineError):
|
| 19 |
+
pass
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class PipelineDrawingError(PipelineError):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PipelineMaxComponentRuns(PipelineError):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class PipelineUnmarshalError(PipelineError):
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ComponentError(Exception):
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ComponentDeserializationError(Exception):
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class DeserializationError(Exception):
|
| 43 |
+
pass
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class SerializationError(Exception):
|
| 47 |
+
pass
|
testbed/deepset-ai__haystack/haystack/core/pipeline/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from .pipeline import Pipeline
|
| 6 |
+
from .template import PredefinedPipeline
|
| 7 |
+
|
| 8 |
+
__all__ = ["Pipeline", "PredefinedPipeline"]
|
testbed/deepset-ai__haystack/haystack/core/pipeline/base.py
ADDED
|
@@ -0,0 +1,1375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import importlib
|
| 6 |
+
import itertools
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
from copy import deepcopy
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Any, Dict, Iterator, List, Optional, Set, TextIO, Tuple, Type, TypeVar, Union
|
| 12 |
+
|
| 13 |
+
import networkx # type:ignore
|
| 14 |
+
|
| 15 |
+
from haystack import logging
|
| 16 |
+
from haystack.core.component import Component, InputSocket, OutputSocket, component
|
| 17 |
+
from haystack.core.errors import (
|
| 18 |
+
DeserializationError,
|
| 19 |
+
PipelineConnectError,
|
| 20 |
+
PipelineDrawingError,
|
| 21 |
+
PipelineError,
|
| 22 |
+
PipelineRuntimeError,
|
| 23 |
+
PipelineUnmarshalError,
|
| 24 |
+
PipelineValidationError,
|
| 25 |
+
)
|
| 26 |
+
from haystack.core.serialization import DeserializationCallbacks, component_from_dict, component_to_dict
|
| 27 |
+
from haystack.core.type_utils import _type_name, _types_are_compatible
|
| 28 |
+
from haystack.marshal import Marshaller, YamlMarshaller
|
| 29 |
+
from haystack.utils import is_in_jupyter
|
| 30 |
+
|
| 31 |
+
from .descriptions import find_pipeline_inputs, find_pipeline_outputs
|
| 32 |
+
from .draw import _to_mermaid_image
|
| 33 |
+
from .template import PipelineTemplate, PredefinedPipeline
|
| 34 |
+
from .utils import parse_connect_string
|
| 35 |
+
|
| 36 |
+
DEFAULT_MARSHALLER = YamlMarshaller()
|
| 37 |
+
|
| 38 |
+
# We use a generic type to annotate the return value of classmethods,
|
| 39 |
+
# so that static analyzers won't be confused when derived classes
|
| 40 |
+
# use those methods.
|
| 41 |
+
T = TypeVar("T", bound="PipelineBase")
|
| 42 |
+
|
| 43 |
+
logger = logging.getLogger(__name__)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class PipelineBase:
|
| 47 |
+
"""
|
| 48 |
+
Components orchestration engine.
|
| 49 |
+
|
| 50 |
+
Builds a graph of components and orchestrates their execution according to the execution graph.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, metadata: Optional[Dict[str, Any]] = None, max_runs_per_component: int = 100):
|
| 54 |
+
"""
|
| 55 |
+
Creates the Pipeline.
|
| 56 |
+
|
| 57 |
+
:param metadata:
|
| 58 |
+
Arbitrary dictionary to store metadata about this `Pipeline`. Make sure all the values contained in
|
| 59 |
+
this dictionary can be serialized and deserialized if you wish to save this `Pipeline` to file.
|
| 60 |
+
:param max_runs_per_component:
|
| 61 |
+
How many times the `Pipeline` can run the same Component.
|
| 62 |
+
If this limit is reached a `PipelineMaxComponentRuns` exception is raised.
|
| 63 |
+
If not set defaults to 100 runs per Component.
|
| 64 |
+
"""
|
| 65 |
+
self._telemetry_runs = 0
|
| 66 |
+
self._last_telemetry_sent: Optional[datetime] = None
|
| 67 |
+
self.metadata = metadata or {}
|
| 68 |
+
self.graph = networkx.MultiDiGraph()
|
| 69 |
+
self._max_runs_per_component = max_runs_per_component
|
| 70 |
+
|
| 71 |
+
def __eq__(self, other) -> bool:
|
| 72 |
+
"""
|
| 73 |
+
Pipeline equality is defined by their type and the equality of their serialized form.
|
| 74 |
+
|
| 75 |
+
Pipelines of the same type share every metadata, node and edge, but they're not required to use
|
| 76 |
+
the same node instances: this allows pipeline saved and then loaded back to be equal to themselves.
|
| 77 |
+
"""
|
| 78 |
+
if not isinstance(self, type(other)):
|
| 79 |
+
return False
|
| 80 |
+
return self.to_dict() == other.to_dict()
|
| 81 |
+
|
| 82 |
+
def __repr__(self) -> str:
|
| 83 |
+
"""
|
| 84 |
+
Returns a text representation of the Pipeline.
|
| 85 |
+
"""
|
| 86 |
+
res = f"{object.__repr__(self)}\n"
|
| 87 |
+
if self.metadata:
|
| 88 |
+
res += "🧱 Metadata\n"
|
| 89 |
+
for k, v in self.metadata.items():
|
| 90 |
+
res += f" - {k}: {v}\n"
|
| 91 |
+
|
| 92 |
+
res += "🚅 Components\n"
|
| 93 |
+
for name, instance in self.graph.nodes(data="instance"): # type: ignore # type wrongly defined in networkx
|
| 94 |
+
res += f" - {name}: {instance.__class__.__name__}\n"
|
| 95 |
+
|
| 96 |
+
res += "🛤️ Connections\n"
|
| 97 |
+
for sender, receiver, edge_data in self.graph.edges(data=True):
|
| 98 |
+
sender_socket = edge_data["from_socket"].name
|
| 99 |
+
receiver_socket = edge_data["to_socket"].name
|
| 100 |
+
res += f" - {sender}.{sender_socket} -> {receiver}.{receiver_socket} ({edge_data['conn_type']})\n"
|
| 101 |
+
|
| 102 |
+
return res
|
| 103 |
+
|
| 104 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 105 |
+
"""
|
| 106 |
+
Serializes the pipeline to a dictionary.
|
| 107 |
+
|
| 108 |
+
This is meant to be an intermediate representation but it can be also used to save a pipeline to file.
|
| 109 |
+
|
| 110 |
+
:returns:
|
| 111 |
+
Dictionary with serialized data.
|
| 112 |
+
"""
|
| 113 |
+
components = {}
|
| 114 |
+
for name, instance in self.graph.nodes(data="instance"): # type:ignore
|
| 115 |
+
components[name] = component_to_dict(instance, name)
|
| 116 |
+
|
| 117 |
+
connections = []
|
| 118 |
+
for sender, receiver, edge_data in self.graph.edges.data():
|
| 119 |
+
sender_socket = edge_data["from_socket"].name
|
| 120 |
+
receiver_socket = edge_data["to_socket"].name
|
| 121 |
+
connections.append({"sender": f"{sender}.{sender_socket}", "receiver": f"{receiver}.{receiver_socket}"})
|
| 122 |
+
return {
|
| 123 |
+
"metadata": self.metadata,
|
| 124 |
+
"max_runs_per_component": self._max_runs_per_component,
|
| 125 |
+
"components": components,
|
| 126 |
+
"connections": connections,
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
@classmethod
|
| 130 |
+
def from_dict(
|
| 131 |
+
cls: Type[T], data: Dict[str, Any], callbacks: Optional[DeserializationCallbacks] = None, **kwargs
|
| 132 |
+
) -> T:
|
| 133 |
+
"""
|
| 134 |
+
Deserializes the pipeline from a dictionary.
|
| 135 |
+
|
| 136 |
+
:param data:
|
| 137 |
+
Dictionary to deserialize from.
|
| 138 |
+
:param callbacks:
|
| 139 |
+
Callbacks to invoke during deserialization.
|
| 140 |
+
:param kwargs:
|
| 141 |
+
`components`: a dictionary of {name: instance} to reuse instances of components instead of creating new
|
| 142 |
+
ones.
|
| 143 |
+
:returns:
|
| 144 |
+
Deserialized component.
|
| 145 |
+
"""
|
| 146 |
+
data_copy = deepcopy(data) # to prevent modification of original data
|
| 147 |
+
metadata = data_copy.get("metadata", {})
|
| 148 |
+
max_runs_per_component = data_copy.get("max_runs_per_component", 100)
|
| 149 |
+
pipe = cls(metadata=metadata, max_runs_per_component=max_runs_per_component)
|
| 150 |
+
components_to_reuse = kwargs.get("components", {})
|
| 151 |
+
for name, component_data in data_copy.get("components", {}).items():
|
| 152 |
+
if name in components_to_reuse:
|
| 153 |
+
# Reuse an instance
|
| 154 |
+
instance = components_to_reuse[name]
|
| 155 |
+
else:
|
| 156 |
+
if "type" not in component_data:
|
| 157 |
+
raise PipelineError(f"Missing 'type' in component '{name}'")
|
| 158 |
+
|
| 159 |
+
if component_data["type"] not in component.registry:
|
| 160 |
+
try:
|
| 161 |
+
# Import the module first...
|
| 162 |
+
module, _ = component_data["type"].rsplit(".", 1)
|
| 163 |
+
logger.debug("Trying to import module {module_name}", module_name=module)
|
| 164 |
+
importlib.import_module(module)
|
| 165 |
+
# ...then try again
|
| 166 |
+
if component_data["type"] not in component.registry:
|
| 167 |
+
raise PipelineError(
|
| 168 |
+
f"Successfully imported module {module} but can't find it in the component registry."
|
| 169 |
+
"This is unexpected and most likely a bug."
|
| 170 |
+
)
|
| 171 |
+
except (ImportError, PipelineError) as e:
|
| 172 |
+
raise PipelineError(f"Component '{component_data['type']}' not imported.") from e
|
| 173 |
+
|
| 174 |
+
# Create a new one
|
| 175 |
+
component_class = component.registry[component_data["type"]]
|
| 176 |
+
|
| 177 |
+
try:
|
| 178 |
+
instance = component_from_dict(component_class, component_data, name, callbacks)
|
| 179 |
+
except Exception as e:
|
| 180 |
+
msg = (
|
| 181 |
+
f"Couldn't deserialize component '{name}' of class '{component_class.__name__}' "
|
| 182 |
+
f"with the following data: {str(component_data)}. Possible reasons include "
|
| 183 |
+
"malformed serialized data, mismatch between the serialized component and the "
|
| 184 |
+
"loaded one (due to a breaking change, see "
|
| 185 |
+
"https://github.com/deepset-ai/haystack/releases), etc."
|
| 186 |
+
)
|
| 187 |
+
raise DeserializationError(msg) from e
|
| 188 |
+
pipe.add_component(name=name, instance=instance)
|
| 189 |
+
|
| 190 |
+
for connection in data.get("connections", []):
|
| 191 |
+
if "sender" not in connection:
|
| 192 |
+
raise PipelineError(f"Missing sender in connection: {connection}")
|
| 193 |
+
if "receiver" not in connection:
|
| 194 |
+
raise PipelineError(f"Missing receiver in connection: {connection}")
|
| 195 |
+
pipe.connect(sender=connection["sender"], receiver=connection["receiver"])
|
| 196 |
+
|
| 197 |
+
return pipe
|
| 198 |
+
|
| 199 |
+
def dumps(self, marshaller: Marshaller = DEFAULT_MARSHALLER) -> str:
|
| 200 |
+
"""
|
| 201 |
+
Returns the string representation of this pipeline according to the format dictated by the `Marshaller` in use.
|
| 202 |
+
|
| 203 |
+
:param marshaller:
|
| 204 |
+
The Marshaller used to create the string representation. Defaults to `YamlMarshaller`.
|
| 205 |
+
:returns:
|
| 206 |
+
A string representing the pipeline.
|
| 207 |
+
"""
|
| 208 |
+
return marshaller.marshal(self.to_dict())
|
| 209 |
+
|
| 210 |
+
def dump(self, fp: TextIO, marshaller: Marshaller = DEFAULT_MARSHALLER):
|
| 211 |
+
"""
|
| 212 |
+
Writes the string representation of this pipeline to the file-like object passed in the `fp` argument.
|
| 213 |
+
|
| 214 |
+
:param fp:
|
| 215 |
+
A file-like object ready to be written to.
|
| 216 |
+
:param marshaller:
|
| 217 |
+
The Marshaller used to create the string representation. Defaults to `YamlMarshaller`.
|
| 218 |
+
"""
|
| 219 |
+
fp.write(marshaller.marshal(self.to_dict()))
|
| 220 |
+
|
| 221 |
+
@classmethod
|
| 222 |
+
def loads(
|
| 223 |
+
cls: Type[T],
|
| 224 |
+
data: Union[str, bytes, bytearray],
|
| 225 |
+
marshaller: Marshaller = DEFAULT_MARSHALLER,
|
| 226 |
+
callbacks: Optional[DeserializationCallbacks] = None,
|
| 227 |
+
) -> T:
|
| 228 |
+
"""
|
| 229 |
+
Creates a `Pipeline` object from the string representation passed in the `data` argument.
|
| 230 |
+
|
| 231 |
+
:param data:
|
| 232 |
+
The string representation of the pipeline, can be `str`, `bytes` or `bytearray`.
|
| 233 |
+
:param marshaller:
|
| 234 |
+
The Marshaller used to create the string representation. Defaults to `YamlMarshaller`.
|
| 235 |
+
:param callbacks:
|
| 236 |
+
Callbacks to invoke during deserialization.
|
| 237 |
+
:raises DeserializationError:
|
| 238 |
+
If an error occurs during deserialization.
|
| 239 |
+
:returns:
|
| 240 |
+
A `Pipeline` object.
|
| 241 |
+
"""
|
| 242 |
+
try:
|
| 243 |
+
deserialized_data = marshaller.unmarshal(data)
|
| 244 |
+
except Exception as e:
|
| 245 |
+
raise DeserializationError(
|
| 246 |
+
"Error while unmarshalling serialized pipeline data. This is usually "
|
| 247 |
+
"caused by malformed or invalid syntax in the serialized representation."
|
| 248 |
+
) from e
|
| 249 |
+
|
| 250 |
+
return cls.from_dict(deserialized_data, callbacks)
|
| 251 |
+
|
| 252 |
+
@classmethod
|
| 253 |
+
def load(
|
| 254 |
+
cls: Type[T],
|
| 255 |
+
fp: TextIO,
|
| 256 |
+
marshaller: Marshaller = DEFAULT_MARSHALLER,
|
| 257 |
+
callbacks: Optional[DeserializationCallbacks] = None,
|
| 258 |
+
) -> T:
|
| 259 |
+
"""
|
| 260 |
+
Creates a `Pipeline` object a string representation.
|
| 261 |
+
|
| 262 |
+
The string representation is read from the file-like object passed in the `fp` argument.
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
:param fp:
|
| 266 |
+
A file-like object ready to be read from.
|
| 267 |
+
:param marshaller:
|
| 268 |
+
The Marshaller used to create the string representation. Defaults to `YamlMarshaller`.
|
| 269 |
+
:param callbacks:
|
| 270 |
+
Callbacks to invoke during deserialization.
|
| 271 |
+
:raises DeserializationError:
|
| 272 |
+
If an error occurs during deserialization.
|
| 273 |
+
:returns:
|
| 274 |
+
A `Pipeline` object.
|
| 275 |
+
"""
|
| 276 |
+
return cls.loads(fp.read(), marshaller, callbacks)
|
| 277 |
+
|
| 278 |
+
def add_component(self, name: str, instance: Component) -> None:
|
| 279 |
+
"""
|
| 280 |
+
Add the given component to the pipeline.
|
| 281 |
+
|
| 282 |
+
Components are not connected to anything by default: use `Pipeline.connect()` to connect components together.
|
| 283 |
+
Component names must be unique, but component instances can be reused if needed.
|
| 284 |
+
|
| 285 |
+
:param name:
|
| 286 |
+
The name of the component to add.
|
| 287 |
+
:param instance:
|
| 288 |
+
The component instance to add.
|
| 289 |
+
|
| 290 |
+
:raises ValueError:
|
| 291 |
+
If a component with the same name already exists.
|
| 292 |
+
:raises PipelineValidationError:
|
| 293 |
+
If the given instance is not a Canals component.
|
| 294 |
+
"""
|
| 295 |
+
# Component names are unique
|
| 296 |
+
if name in self.graph.nodes:
|
| 297 |
+
raise ValueError(f"A component named '{name}' already exists in this pipeline: choose another name.")
|
| 298 |
+
|
| 299 |
+
# Components can't be named `_debug`
|
| 300 |
+
if name == "_debug":
|
| 301 |
+
raise ValueError("'_debug' is a reserved name for debug output. Choose another name.")
|
| 302 |
+
|
| 303 |
+
# Component instances must be components
|
| 304 |
+
if not isinstance(instance, Component):
|
| 305 |
+
raise PipelineValidationError(
|
| 306 |
+
f"'{type(instance)}' doesn't seem to be a component. Is this class decorated with @component?"
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
if getattr(instance, "__haystack_added_to_pipeline__", None):
|
| 310 |
+
msg = (
|
| 311 |
+
"Component has already been added in another Pipeline. Components can't be shared between Pipelines. "
|
| 312 |
+
"Create a new instance instead."
|
| 313 |
+
)
|
| 314 |
+
raise PipelineError(msg)
|
| 315 |
+
|
| 316 |
+
setattr(instance, "__haystack_added_to_pipeline__", self)
|
| 317 |
+
|
| 318 |
+
# Add component to the graph, disconnected
|
| 319 |
+
logger.debug("Adding component '{component_name}' ({component})", component_name=name, component=instance)
|
| 320 |
+
# We're completely sure the fields exist so we ignore the type error
|
| 321 |
+
self.graph.add_node(
|
| 322 |
+
name,
|
| 323 |
+
instance=instance,
|
| 324 |
+
input_sockets=instance.__haystack_input__._sockets_dict, # type: ignore[attr-defined]
|
| 325 |
+
output_sockets=instance.__haystack_output__._sockets_dict, # type: ignore[attr-defined]
|
| 326 |
+
visits=0,
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
def remove_component(self, name: str) -> Component:
|
| 330 |
+
"""
|
| 331 |
+
Remove and returns component from the pipeline.
|
| 332 |
+
|
| 333 |
+
Remove an existing component from the pipeline by providing its name.
|
| 334 |
+
All edges that connect to the component will also be deleted.
|
| 335 |
+
|
| 336 |
+
:param name:
|
| 337 |
+
The name of the component to remove.
|
| 338 |
+
:returns:
|
| 339 |
+
The removed Component instance.
|
| 340 |
+
|
| 341 |
+
:raises ValueError:
|
| 342 |
+
If there is no component with that name already in the Pipeline.
|
| 343 |
+
"""
|
| 344 |
+
|
| 345 |
+
# Check that a component with that name is in the Pipeline
|
| 346 |
+
try:
|
| 347 |
+
instance = self.get_component(name)
|
| 348 |
+
except ValueError as exc:
|
| 349 |
+
raise ValueError(
|
| 350 |
+
f"There is no component named '{name}' in the pipeline. The valid component names are: ",
|
| 351 |
+
", ".join(n for n in self.graph.nodes),
|
| 352 |
+
) from exc
|
| 353 |
+
|
| 354 |
+
# Delete component from the graph, deleting all its connections
|
| 355 |
+
self.graph.remove_node(name)
|
| 356 |
+
|
| 357 |
+
# Reset the Component sockets' senders and receivers
|
| 358 |
+
input_sockets = instance.__haystack_input__._sockets_dict # type: ignore[attr-defined]
|
| 359 |
+
for socket in input_sockets.values():
|
| 360 |
+
socket.senders = []
|
| 361 |
+
|
| 362 |
+
output_sockets = instance.__haystack_output__._sockets_dict # type: ignore[attr-defined]
|
| 363 |
+
for socket in output_sockets.values():
|
| 364 |
+
socket.receivers = []
|
| 365 |
+
|
| 366 |
+
# Reset the Component's pipeline reference
|
| 367 |
+
setattr(instance, "__haystack_added_to_pipeline__", None)
|
| 368 |
+
|
| 369 |
+
return instance
|
| 370 |
+
|
| 371 |
+
def connect(self, sender: str, receiver: str) -> "PipelineBase": # noqa: PLR0915
|
| 372 |
+
"""
|
| 373 |
+
Connects two components together.
|
| 374 |
+
|
| 375 |
+
All components to connect must exist in the pipeline.
|
| 376 |
+
If connecting to a component that has several output connections, specify the inputs and output names as
|
| 377 |
+
'component_name.connections_name'.
|
| 378 |
+
|
| 379 |
+
:param sender:
|
| 380 |
+
The component that delivers the value. This can be either just a component name or can be
|
| 381 |
+
in the format `component_name.connection_name` if the component has multiple outputs.
|
| 382 |
+
:param receiver:
|
| 383 |
+
The component that receives the value. This can be either just a component name or can be
|
| 384 |
+
in the format `component_name.connection_name` if the component has multiple inputs.
|
| 385 |
+
:returns:
|
| 386 |
+
The Pipeline instance.
|
| 387 |
+
|
| 388 |
+
:raises PipelineConnectError:
|
| 389 |
+
If the two components cannot be connected (for example if one of the components is
|
| 390 |
+
not present in the pipeline, or the connections don't match by type, and so on).
|
| 391 |
+
"""
|
| 392 |
+
# Edges may be named explicitly by passing 'node_name.edge_name' to connect().
|
| 393 |
+
sender_component_name, sender_socket_name = parse_connect_string(sender)
|
| 394 |
+
receiver_component_name, receiver_socket_name = parse_connect_string(receiver)
|
| 395 |
+
|
| 396 |
+
if sender_component_name == receiver_component_name:
|
| 397 |
+
raise PipelineConnectError("Connecting a Component to itself is not supported.")
|
| 398 |
+
|
| 399 |
+
# Get the nodes data.
|
| 400 |
+
try:
|
| 401 |
+
from_sockets = self.graph.nodes[sender_component_name]["output_sockets"]
|
| 402 |
+
except KeyError as exc:
|
| 403 |
+
raise ValueError(f"Component named {sender_component_name} not found in the pipeline.") from exc
|
| 404 |
+
try:
|
| 405 |
+
to_sockets = self.graph.nodes[receiver_component_name]["input_sockets"]
|
| 406 |
+
except KeyError as exc:
|
| 407 |
+
raise ValueError(f"Component named {receiver_component_name} not found in the pipeline.") from exc
|
| 408 |
+
|
| 409 |
+
# If the name of either socket is given, get the socket
|
| 410 |
+
sender_socket: Optional[OutputSocket] = None
|
| 411 |
+
if sender_socket_name:
|
| 412 |
+
sender_socket = from_sockets.get(sender_socket_name)
|
| 413 |
+
if not sender_socket:
|
| 414 |
+
raise PipelineConnectError(
|
| 415 |
+
f"'{sender} does not exist. "
|
| 416 |
+
f"Output connections of {sender_component_name} are: "
|
| 417 |
+
+ ", ".join([f"{name} (type {_type_name(socket.type)})" for name, socket in from_sockets.items()])
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
receiver_socket: Optional[InputSocket] = None
|
| 421 |
+
if receiver_socket_name:
|
| 422 |
+
receiver_socket = to_sockets.get(receiver_socket_name)
|
| 423 |
+
if not receiver_socket:
|
| 424 |
+
raise PipelineConnectError(
|
| 425 |
+
f"'{receiver} does not exist. "
|
| 426 |
+
f"Input connections of {receiver_component_name} are: "
|
| 427 |
+
+ ", ".join([f"{name} (type {_type_name(socket.type)})" for name, socket in to_sockets.items()])
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
# Look for a matching connection among the possible ones.
|
| 431 |
+
# Note that if there is more than one possible connection but two sockets match by name, they're paired.
|
| 432 |
+
sender_socket_candidates: List[OutputSocket] = [sender_socket] if sender_socket else list(from_sockets.values())
|
| 433 |
+
receiver_socket_candidates: List[InputSocket] = (
|
| 434 |
+
[receiver_socket] if receiver_socket else list(to_sockets.values())
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
# Find all possible connections between these two components
|
| 438 |
+
possible_connections = [
|
| 439 |
+
(sender_sock, receiver_sock)
|
| 440 |
+
for sender_sock, receiver_sock in itertools.product(sender_socket_candidates, receiver_socket_candidates)
|
| 441 |
+
if _types_are_compatible(sender_sock.type, receiver_sock.type)
|
| 442 |
+
]
|
| 443 |
+
|
| 444 |
+
# We need this status for error messages, since we might need it in multiple places we calculate it here
|
| 445 |
+
status = _connections_status(
|
| 446 |
+
sender_node=sender_component_name,
|
| 447 |
+
sender_sockets=sender_socket_candidates,
|
| 448 |
+
receiver_node=receiver_component_name,
|
| 449 |
+
receiver_sockets=receiver_socket_candidates,
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
if not possible_connections:
|
| 453 |
+
# There's no possible connection between these two components
|
| 454 |
+
if len(sender_socket_candidates) == len(receiver_socket_candidates) == 1:
|
| 455 |
+
msg = (
|
| 456 |
+
f"Cannot connect '{sender_component_name}.{sender_socket_candidates[0].name}' with "
|
| 457 |
+
f"'{receiver_component_name}.{receiver_socket_candidates[0].name}': "
|
| 458 |
+
f"their declared input and output types do not match.\n{status}"
|
| 459 |
+
)
|
| 460 |
+
else:
|
| 461 |
+
msg = (
|
| 462 |
+
f"Cannot connect '{sender_component_name}' with '{receiver_component_name}': "
|
| 463 |
+
f"no matching connections available.\n{status}"
|
| 464 |
+
)
|
| 465 |
+
raise PipelineConnectError(msg)
|
| 466 |
+
|
| 467 |
+
if len(possible_connections) == 1:
|
| 468 |
+
# There's only one possible connection, use it
|
| 469 |
+
sender_socket = possible_connections[0][0]
|
| 470 |
+
receiver_socket = possible_connections[0][1]
|
| 471 |
+
|
| 472 |
+
if len(possible_connections) > 1:
|
| 473 |
+
# There are multiple possible connection, let's try to match them by name
|
| 474 |
+
name_matches = [
|
| 475 |
+
(out_sock, in_sock) for out_sock, in_sock in possible_connections if in_sock.name == out_sock.name
|
| 476 |
+
]
|
| 477 |
+
if len(name_matches) != 1:
|
| 478 |
+
# There's are either no matches or more than one, we can't pick one reliably
|
| 479 |
+
msg = (
|
| 480 |
+
f"Cannot connect '{sender_component_name}' with "
|
| 481 |
+
f"'{receiver_component_name}': more than one connection is possible "
|
| 482 |
+
"between these components. Please specify the connection name, like: "
|
| 483 |
+
f"pipeline.connect('{sender_component_name}.{possible_connections[0][0].name}', "
|
| 484 |
+
f"'{receiver_component_name}.{possible_connections[0][1].name}').\n{status}"
|
| 485 |
+
)
|
| 486 |
+
raise PipelineConnectError(msg)
|
| 487 |
+
|
| 488 |
+
# Get the only possible match
|
| 489 |
+
sender_socket = name_matches[0][0]
|
| 490 |
+
receiver_socket = name_matches[0][1]
|
| 491 |
+
|
| 492 |
+
# Connection must be valid on both sender/receiver sides
|
| 493 |
+
if not sender_socket or not receiver_socket or not sender_component_name or not receiver_component_name:
|
| 494 |
+
if sender_component_name and sender_socket:
|
| 495 |
+
sender_repr = f"{sender_component_name}.{sender_socket.name} ({_type_name(sender_socket.type)})"
|
| 496 |
+
else:
|
| 497 |
+
sender_repr = "input needed"
|
| 498 |
+
|
| 499 |
+
if receiver_component_name and receiver_socket:
|
| 500 |
+
receiver_repr = f"({_type_name(receiver_socket.type)}) {receiver_component_name}.{receiver_socket.name}"
|
| 501 |
+
else:
|
| 502 |
+
receiver_repr = "output"
|
| 503 |
+
msg = f"Connection must have both sender and receiver: {sender_repr} -> {receiver_repr}"
|
| 504 |
+
raise PipelineConnectError(msg)
|
| 505 |
+
|
| 506 |
+
logger.debug(
|
| 507 |
+
"Connecting '{sender_component}.{sender_socket_name}' to '{receiver_component}.{receiver_socket_name}'",
|
| 508 |
+
sender_component=sender_component_name,
|
| 509 |
+
sender_socket_name=sender_socket.name,
|
| 510 |
+
receiver_component=receiver_component_name,
|
| 511 |
+
receiver_socket_name=receiver_socket.name,
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
if receiver_component_name in sender_socket.receivers and sender_component_name in receiver_socket.senders:
|
| 515 |
+
# This is already connected, nothing to do
|
| 516 |
+
return self
|
| 517 |
+
|
| 518 |
+
if receiver_socket.senders and not receiver_socket.is_variadic:
|
| 519 |
+
# Only variadic input sockets can receive from multiple senders
|
| 520 |
+
msg = (
|
| 521 |
+
f"Cannot connect '{sender_component_name}.{sender_socket.name}' with "
|
| 522 |
+
f"'{receiver_component_name}.{receiver_socket.name}': "
|
| 523 |
+
f"{receiver_component_name}.{receiver_socket.name} is already connected to {receiver_socket.senders}.\n"
|
| 524 |
+
)
|
| 525 |
+
raise PipelineConnectError(msg)
|
| 526 |
+
|
| 527 |
+
# Update the sockets with the new connection
|
| 528 |
+
sender_socket.receivers.append(receiver_component_name)
|
| 529 |
+
receiver_socket.senders.append(sender_component_name)
|
| 530 |
+
|
| 531 |
+
# Create the new connection
|
| 532 |
+
self.graph.add_edge(
|
| 533 |
+
sender_component_name,
|
| 534 |
+
receiver_component_name,
|
| 535 |
+
key=f"{sender_socket.name}/{receiver_socket.name}",
|
| 536 |
+
conn_type=_type_name(sender_socket.type),
|
| 537 |
+
from_socket=sender_socket,
|
| 538 |
+
to_socket=receiver_socket,
|
| 539 |
+
mandatory=receiver_socket.is_mandatory,
|
| 540 |
+
)
|
| 541 |
+
return self
|
| 542 |
+
|
| 543 |
+
def get_component(self, name: str) -> Component:
|
| 544 |
+
"""
|
| 545 |
+
Get the component with the specified name from the pipeline.
|
| 546 |
+
|
| 547 |
+
:param name:
|
| 548 |
+
The name of the component.
|
| 549 |
+
:returns:
|
| 550 |
+
The instance of that component.
|
| 551 |
+
|
| 552 |
+
:raises ValueError:
|
| 553 |
+
If a component with that name is not present in the pipeline.
|
| 554 |
+
"""
|
| 555 |
+
try:
|
| 556 |
+
return self.graph.nodes[name]["instance"]
|
| 557 |
+
except KeyError as exc:
|
| 558 |
+
raise ValueError(f"Component named {name} not found in the pipeline.") from exc
|
| 559 |
+
|
| 560 |
+
def get_component_name(self, instance: Component) -> str:
|
| 561 |
+
"""
|
| 562 |
+
Returns the name of the Component instance if it has been added to this Pipeline or an empty string otherwise.
|
| 563 |
+
|
| 564 |
+
:param instance:
|
| 565 |
+
The Component instance to look for.
|
| 566 |
+
:returns:
|
| 567 |
+
The name of the Component instance.
|
| 568 |
+
"""
|
| 569 |
+
for name, inst in self.graph.nodes(data="instance"): # type: ignore # type wrongly defined in networkx
|
| 570 |
+
if inst == instance:
|
| 571 |
+
return name
|
| 572 |
+
return ""
|
| 573 |
+
|
| 574 |
+
def inputs(self, include_components_with_connected_inputs: bool = False) -> Dict[str, Dict[str, Any]]:
|
| 575 |
+
"""
|
| 576 |
+
Returns a dictionary containing the inputs of a pipeline.
|
| 577 |
+
|
| 578 |
+
Each key in the dictionary corresponds to a component name, and its value is another dictionary that describes
|
| 579 |
+
the input sockets of that component, including their types and whether they are optional.
|
| 580 |
+
|
| 581 |
+
:param include_components_with_connected_inputs:
|
| 582 |
+
If `False`, only components that have disconnected input edges are
|
| 583 |
+
included in the output.
|
| 584 |
+
:returns:
|
| 585 |
+
A dictionary where each key is a pipeline component name and each value is a dictionary of
|
| 586 |
+
inputs sockets of that component.
|
| 587 |
+
"""
|
| 588 |
+
inputs: Dict[str, Dict[str, Any]] = {}
|
| 589 |
+
for component_name, data in find_pipeline_inputs(self.graph, include_components_with_connected_inputs).items():
|
| 590 |
+
sockets_description = {}
|
| 591 |
+
for socket in data:
|
| 592 |
+
sockets_description[socket.name] = {"type": socket.type, "is_mandatory": socket.is_mandatory}
|
| 593 |
+
if not socket.is_mandatory:
|
| 594 |
+
sockets_description[socket.name]["default_value"] = socket.default_value
|
| 595 |
+
|
| 596 |
+
if sockets_description:
|
| 597 |
+
inputs[component_name] = sockets_description
|
| 598 |
+
return inputs
|
| 599 |
+
|
| 600 |
+
def outputs(self, include_components_with_connected_outputs: bool = False) -> Dict[str, Dict[str, Any]]:
|
| 601 |
+
"""
|
| 602 |
+
Returns a dictionary containing the outputs of a pipeline.
|
| 603 |
+
|
| 604 |
+
Each key in the dictionary corresponds to a component name, and its value is another dictionary that describes
|
| 605 |
+
the output sockets of that component.
|
| 606 |
+
|
| 607 |
+
:param include_components_with_connected_outputs:
|
| 608 |
+
If `False`, only components that have disconnected output edges are
|
| 609 |
+
included in the output.
|
| 610 |
+
:returns:
|
| 611 |
+
A dictionary where each key is a pipeline component name and each value is a dictionary of
|
| 612 |
+
output sockets of that component.
|
| 613 |
+
"""
|
| 614 |
+
outputs = {
|
| 615 |
+
comp: {socket.name: {"type": socket.type} for socket in data}
|
| 616 |
+
for comp, data in find_pipeline_outputs(self.graph, include_components_with_connected_outputs).items()
|
| 617 |
+
if data
|
| 618 |
+
}
|
| 619 |
+
return outputs
|
| 620 |
+
|
| 621 |
+
def show(self) -> None:
|
| 622 |
+
"""
|
| 623 |
+
If running in a Jupyter notebook, display an image representing this `Pipeline`.
|
| 624 |
+
|
| 625 |
+
"""
|
| 626 |
+
if is_in_jupyter():
|
| 627 |
+
from IPython.display import Image, display # type: ignore
|
| 628 |
+
|
| 629 |
+
image_data = _to_mermaid_image(self.graph)
|
| 630 |
+
|
| 631 |
+
display(Image(image_data))
|
| 632 |
+
else:
|
| 633 |
+
msg = "This method is only supported in Jupyter notebooks. Use Pipeline.draw() to save an image locally."
|
| 634 |
+
raise PipelineDrawingError(msg)
|
| 635 |
+
|
| 636 |
+
def draw(self, path: Path) -> None:
|
| 637 |
+
"""
|
| 638 |
+
Save an image representing this `Pipeline` to `path`.
|
| 639 |
+
|
| 640 |
+
:param path:
|
| 641 |
+
The path to save the image to.
|
| 642 |
+
"""
|
| 643 |
+
# Before drawing we edit a bit the graph, to avoid modifying the original that is
|
| 644 |
+
# used for running the pipeline we copy it.
|
| 645 |
+
image_data = _to_mermaid_image(self.graph)
|
| 646 |
+
Path(path).write_bytes(image_data)
|
| 647 |
+
|
| 648 |
+
def walk(self) -> Iterator[Tuple[str, Component]]:
|
| 649 |
+
"""
|
| 650 |
+
Visits each component in the pipeline exactly once and yields its name and instance.
|
| 651 |
+
|
| 652 |
+
No guarantees are provided on the visiting order.
|
| 653 |
+
|
| 654 |
+
:returns:
|
| 655 |
+
An iterator of tuples of component name and component instance.
|
| 656 |
+
"""
|
| 657 |
+
for component_name, instance in self.graph.nodes(data="instance"): # type: ignore # type is wrong in networkx
|
| 658 |
+
yield component_name, instance
|
| 659 |
+
|
| 660 |
+
def warm_up(self):
|
| 661 |
+
"""
|
| 662 |
+
Make sure all nodes are warm.
|
| 663 |
+
|
| 664 |
+
It's the node's responsibility to make sure this method can be called at every `Pipeline.run()`
|
| 665 |
+
without re-initializing everything.
|
| 666 |
+
"""
|
| 667 |
+
for node in self.graph.nodes:
|
| 668 |
+
if hasattr(self.graph.nodes[node]["instance"], "warm_up"):
|
| 669 |
+
logger.info("Warming up component {node}...", node=node)
|
| 670 |
+
self.graph.nodes[node]["instance"].warm_up()
|
| 671 |
+
|
| 672 |
+
def _validate_input(self, data: Dict[str, Any]):
|
| 673 |
+
"""
|
| 674 |
+
Validates pipeline input data.
|
| 675 |
+
|
| 676 |
+
Validates that data:
|
| 677 |
+
* Each Component name actually exists in the Pipeline
|
| 678 |
+
* Each Component is not missing any input
|
| 679 |
+
* Each Component has only one input per input socket, if not variadic
|
| 680 |
+
* Each Component doesn't receive inputs that are already sent by another Component
|
| 681 |
+
|
| 682 |
+
:param data:
|
| 683 |
+
A dictionary of inputs for the pipeline's components. Each key is a component name.
|
| 684 |
+
|
| 685 |
+
:raises ValueError:
|
| 686 |
+
If inputs are invalid according to the above.
|
| 687 |
+
"""
|
| 688 |
+
for component_name, component_inputs in data.items():
|
| 689 |
+
if component_name not in self.graph.nodes:
|
| 690 |
+
raise ValueError(f"Component named {component_name} not found in the pipeline.")
|
| 691 |
+
instance = self.graph.nodes[component_name]["instance"]
|
| 692 |
+
for socket_name, socket in instance.__haystack_input__._sockets_dict.items():
|
| 693 |
+
if socket.senders == [] and socket.is_mandatory and socket_name not in component_inputs:
|
| 694 |
+
raise ValueError(f"Missing input for component {component_name}: {socket_name}")
|
| 695 |
+
for input_name in component_inputs.keys():
|
| 696 |
+
if input_name not in instance.__haystack_input__._sockets_dict:
|
| 697 |
+
raise ValueError(f"Input {input_name} not found in component {component_name}.")
|
| 698 |
+
|
| 699 |
+
for component_name in self.graph.nodes:
|
| 700 |
+
instance = self.graph.nodes[component_name]["instance"]
|
| 701 |
+
for socket_name, socket in instance.__haystack_input__._sockets_dict.items():
|
| 702 |
+
component_inputs = data.get(component_name, {})
|
| 703 |
+
if socket.senders == [] and socket.is_mandatory and socket_name not in component_inputs:
|
| 704 |
+
raise ValueError(f"Missing input for component {component_name}: {socket_name}")
|
| 705 |
+
if socket.senders and socket_name in component_inputs and not socket.is_variadic:
|
| 706 |
+
raise ValueError(
|
| 707 |
+
f"Input {socket_name} for component {component_name} is already sent by {socket.senders}."
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
def _prepare_component_input_data(self, data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
| 711 |
+
"""
|
| 712 |
+
Prepares input data for pipeline components.
|
| 713 |
+
|
| 714 |
+
Organizes input data for pipeline components and identifies any inputs that are not matched to any
|
| 715 |
+
component's input slots. Deep-copies data items to avoid sharing mutables across multiple components.
|
| 716 |
+
|
| 717 |
+
This method processes a flat dictionary of input data, where each key-value pair represents an input name
|
| 718 |
+
and its corresponding value. It distributes these inputs to the appropriate pipeline components based on
|
| 719 |
+
their input requirements. Inputs that don't match any component's input slots are classified as unresolved.
|
| 720 |
+
|
| 721 |
+
:param data:
|
| 722 |
+
A dictionary potentially having input names as keys and input values as values.
|
| 723 |
+
|
| 724 |
+
:returns:
|
| 725 |
+
A dictionary mapping component names to their respective matched inputs.
|
| 726 |
+
"""
|
| 727 |
+
# check whether the data is a nested dictionary of component inputs where each key is a component name
|
| 728 |
+
# and each value is a dictionary of input parameters for that component
|
| 729 |
+
is_nested_component_input = all(isinstance(value, dict) for value in data.values())
|
| 730 |
+
if not is_nested_component_input:
|
| 731 |
+
# flat input, a dict where keys are input names and values are the corresponding values
|
| 732 |
+
# we need to convert it to a nested dictionary of component inputs and then run the pipeline
|
| 733 |
+
# just like in the previous case
|
| 734 |
+
pipeline_input_data: Dict[str, Dict[str, Any]] = defaultdict(dict)
|
| 735 |
+
unresolved_kwargs = {}
|
| 736 |
+
|
| 737 |
+
# Retrieve the input slots for each component in the pipeline
|
| 738 |
+
available_inputs: Dict[str, Dict[str, Any]] = self.inputs()
|
| 739 |
+
|
| 740 |
+
# Go through all provided to distribute them to the appropriate component inputs
|
| 741 |
+
for input_name, input_value in data.items():
|
| 742 |
+
resolved_at_least_once = False
|
| 743 |
+
|
| 744 |
+
# Check each component to see if it has a slot for the current kwarg
|
| 745 |
+
for component_name, component_inputs in available_inputs.items():
|
| 746 |
+
if input_name in component_inputs:
|
| 747 |
+
# If a match is found, add the kwarg to the component's input data
|
| 748 |
+
pipeline_input_data[component_name][input_name] = input_value
|
| 749 |
+
resolved_at_least_once = True
|
| 750 |
+
|
| 751 |
+
if not resolved_at_least_once:
|
| 752 |
+
unresolved_kwargs[input_name] = input_value
|
| 753 |
+
|
| 754 |
+
if unresolved_kwargs:
|
| 755 |
+
logger.warning(
|
| 756 |
+
"Inputs {input_keys} were not matched to any component inputs, please check your run parameters.",
|
| 757 |
+
input_keys=list(unresolved_kwargs.keys()),
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
data = dict(pipeline_input_data)
|
| 761 |
+
|
| 762 |
+
# deepcopying the inputs prevents the Pipeline run logic from being altered unexpectedly
|
| 763 |
+
# when the same input reference is passed to multiple components.
|
| 764 |
+
for component_name, component_inputs in data.items():
|
| 765 |
+
data[component_name] = {k: deepcopy(v) for k, v in component_inputs.items()}
|
| 766 |
+
|
| 767 |
+
return data
|
| 768 |
+
|
| 769 |
+
def _normalize_varidiac_input_data(self, data: Dict[str, Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
| 770 |
+
"""
|
| 771 |
+
Variadic inputs expect their value to be a list, this utility method creates that list from the user's input.
|
| 772 |
+
"""
|
| 773 |
+
for component_name, component_inputs in data.items():
|
| 774 |
+
if component_name not in self.graph.nodes:
|
| 775 |
+
# This is not a component name, it must be the name of one or more input sockets.
|
| 776 |
+
# Those are handled in a different way, so we skip them here.
|
| 777 |
+
continue
|
| 778 |
+
instance = self.graph.nodes[component_name]["instance"]
|
| 779 |
+
for component_input, input_value in component_inputs.items():
|
| 780 |
+
if instance.__haystack_input__._sockets_dict[component_input].is_variadic:
|
| 781 |
+
# Components that have variadic inputs need to receive lists as input.
|
| 782 |
+
# We don't want to force the user to always pass lists, so we convert single values to lists here.
|
| 783 |
+
# If it's already a list we assume the component takes a variadic input of lists, so we
|
| 784 |
+
# convert it in any case.
|
| 785 |
+
data[component_name][component_input] = [input_value]
|
| 786 |
+
|
| 787 |
+
return {**data}
|
| 788 |
+
|
| 789 |
+
@classmethod
|
| 790 |
+
def from_template(
|
| 791 |
+
cls, predefined_pipeline: PredefinedPipeline, template_params: Optional[Dict[str, Any]] = None
|
| 792 |
+
) -> "PipelineBase":
|
| 793 |
+
"""
|
| 794 |
+
Create a Pipeline from a predefined template. See `PredefinedPipeline` for available options.
|
| 795 |
+
|
| 796 |
+
:param predefined_pipeline:
|
| 797 |
+
The predefined pipeline to use.
|
| 798 |
+
:param template_params:
|
| 799 |
+
An optional dictionary of parameters to use when rendering the pipeline template.
|
| 800 |
+
:returns:
|
| 801 |
+
An instance of `Pipeline`.
|
| 802 |
+
"""
|
| 803 |
+
tpl = PipelineTemplate.from_predefined(predefined_pipeline)
|
| 804 |
+
# If tpl.render() fails, we let bubble up the original error
|
| 805 |
+
rendered = tpl.render(template_params)
|
| 806 |
+
|
| 807 |
+
# If there was a problem with the rendered version of the
|
| 808 |
+
# template, we add it to the error stack for debugging
|
| 809 |
+
try:
|
| 810 |
+
return cls.loads(rendered)
|
| 811 |
+
except Exception as e:
|
| 812 |
+
msg = f"Error unmarshalling pipeline: {e}\n"
|
| 813 |
+
msg += f"Source:\n{rendered}"
|
| 814 |
+
raise PipelineUnmarshalError(msg)
|
| 815 |
+
|
| 816 |
+
def _init_graph(self):
|
| 817 |
+
"""Resets the visits count for each component"""
|
| 818 |
+
for node in self.graph.nodes:
|
| 819 |
+
self.graph.nodes[node]["visits"] = 0
|
| 820 |
+
|
| 821 |
+
def _find_receivers_from(self, component_name: str) -> List[Tuple[str, OutputSocket, InputSocket]]:
|
| 822 |
+
"""
|
| 823 |
+
Utility function to find all Components that receive input form `component_name`.
|
| 824 |
+
|
| 825 |
+
:param component_name:
|
| 826 |
+
Name of the sender Component
|
| 827 |
+
|
| 828 |
+
:returns:
|
| 829 |
+
List of tuples containing name of the receiver Component and sender OutputSocket
|
| 830 |
+
and receiver InputSocket instances
|
| 831 |
+
"""
|
| 832 |
+
res = []
|
| 833 |
+
for _, receiver_name, connection in self.graph.edges(nbunch=component_name, data=True):
|
| 834 |
+
sender_socket: OutputSocket = connection["from_socket"]
|
| 835 |
+
receiver_socket: InputSocket = connection["to_socket"]
|
| 836 |
+
res.append((receiver_name, sender_socket, receiver_socket))
|
| 837 |
+
return res
|
| 838 |
+
|
| 839 |
+
def _distribute_output( # pylint: disable=too-many-positional-arguments
|
| 840 |
+
self,
|
| 841 |
+
receiver_components: List[Tuple[str, OutputSocket, InputSocket]],
|
| 842 |
+
component_result: Dict[str, Any],
|
| 843 |
+
components_inputs: Dict[str, Dict[str, Any]],
|
| 844 |
+
run_queue: List[Tuple[str, Component]],
|
| 845 |
+
waiting_queue: List[Tuple[str, Component]],
|
| 846 |
+
) -> Dict[str, Any]:
|
| 847 |
+
"""
|
| 848 |
+
Distributes the output of a Component to the next Components that need it.
|
| 849 |
+
|
| 850 |
+
This also updates the queues that keep track of which Components are ready to run and which are waiting for
|
| 851 |
+
input.
|
| 852 |
+
|
| 853 |
+
:param receiver_components:
|
| 854 |
+
List of tuples containing name of receiver Components and relative sender OutputSocket
|
| 855 |
+
and receiver InputSocket instances
|
| 856 |
+
:param component_result:
|
| 857 |
+
The output of the Component
|
| 858 |
+
:param components_inputs:
|
| 859 |
+
The current state of the inputs divided by Component name
|
| 860 |
+
:param run_queue:
|
| 861 |
+
Queue of Components to run
|
| 862 |
+
:param waiting_queue:
|
| 863 |
+
Queue of Components waiting for input
|
| 864 |
+
|
| 865 |
+
:returns:
|
| 866 |
+
The updated output of the Component without the keys that were distributed to other Components
|
| 867 |
+
"""
|
| 868 |
+
# We keep track of which keys to remove from component_result at the end of the loop.
|
| 869 |
+
# This is done after the output has been distributed to the next components, so that
|
| 870 |
+
# we're sure all components that need this output have received it.
|
| 871 |
+
to_remove_from_component_result = set()
|
| 872 |
+
|
| 873 |
+
for receiver_name, sender_socket, receiver_socket in receiver_components:
|
| 874 |
+
if sender_socket.name not in component_result:
|
| 875 |
+
# This output wasn't created by the sender, nothing we can do.
|
| 876 |
+
#
|
| 877 |
+
# Some Components might have conditional outputs, so we need to check if they actually returned
|
| 878 |
+
# some output while iterating over their output sockets.
|
| 879 |
+
#
|
| 880 |
+
# A perfect example of this would be the ConditionalRouter, which will have an output for each
|
| 881 |
+
# condition it has been initialized with.
|
| 882 |
+
# Though it will return only one output at a time.
|
| 883 |
+
continue
|
| 884 |
+
|
| 885 |
+
if receiver_name not in components_inputs:
|
| 886 |
+
components_inputs[receiver_name] = {}
|
| 887 |
+
|
| 888 |
+
# We keep track of the keys that were distributed to other Components.
|
| 889 |
+
# This key will be removed from component_result at the end of the loop.
|
| 890 |
+
to_remove_from_component_result.add(sender_socket.name)
|
| 891 |
+
|
| 892 |
+
value = component_result[sender_socket.name]
|
| 893 |
+
|
| 894 |
+
if receiver_socket.is_variadic:
|
| 895 |
+
# Usually Component inputs can only be received from one sender, the Variadic type allows
|
| 896 |
+
# instead to receive inputs from multiple senders.
|
| 897 |
+
#
|
| 898 |
+
# To keep track of all the inputs received internally we always store them in a list.
|
| 899 |
+
if receiver_socket.name not in components_inputs[receiver_name]:
|
| 900 |
+
# Create the list if it doesn't exist
|
| 901 |
+
components_inputs[receiver_name][receiver_socket.name] = []
|
| 902 |
+
else:
|
| 903 |
+
# Check if the value is actually a list
|
| 904 |
+
assert isinstance(components_inputs[receiver_name][receiver_socket.name], list)
|
| 905 |
+
components_inputs[receiver_name][receiver_socket.name].append(value)
|
| 906 |
+
else:
|
| 907 |
+
components_inputs[receiver_name][receiver_socket.name] = value
|
| 908 |
+
|
| 909 |
+
receiver = self.graph.nodes[receiver_name]["instance"]
|
| 910 |
+
pair = (receiver_name, receiver)
|
| 911 |
+
|
| 912 |
+
if receiver_socket.is_variadic:
|
| 913 |
+
if receiver_socket.is_greedy:
|
| 914 |
+
# If the receiver is greedy, we can run it as soon as possible.
|
| 915 |
+
# First we remove it from the status lists it's in if it's there or
|
| 916 |
+
# we risk running it multiple times.
|
| 917 |
+
if pair in run_queue:
|
| 918 |
+
run_queue.remove(pair)
|
| 919 |
+
if pair in waiting_queue:
|
| 920 |
+
waiting_queue.remove(pair)
|
| 921 |
+
run_queue.insert(0, pair)
|
| 922 |
+
else:
|
| 923 |
+
# If the receiver Component has a variadic input that is not greedy
|
| 924 |
+
# we put it in the waiting queue.
|
| 925 |
+
# This make sure that we don't run it earlier than necessary and we can collect
|
| 926 |
+
# as many inputs as we can before running it.
|
| 927 |
+
if pair not in waiting_queue:
|
| 928 |
+
waiting_queue.append(pair)
|
| 929 |
+
|
| 930 |
+
if pair not in waiting_queue and pair not in run_queue:
|
| 931 |
+
# Queue up the Component that received this input to run, only if it's not already waiting
|
| 932 |
+
# for input or already ready to run.
|
| 933 |
+
run_queue.append(pair)
|
| 934 |
+
|
| 935 |
+
# Returns the output without the keys that were distributed to other Components
|
| 936 |
+
return {k: v for k, v in component_result.items() if k not in to_remove_from_component_result}
|
| 937 |
+
|
| 938 |
+
def _find_next_runnable_component(
|
| 939 |
+
self, components_inputs: Dict[str, Dict[str, Any]], waiting_queue: List[Tuple[str, Component]]
|
| 940 |
+
) -> Tuple[str, Component]:
|
| 941 |
+
"""
|
| 942 |
+
Finds the next Component that can be run and returns it.
|
| 943 |
+
|
| 944 |
+
:param components_inputs: The current state of the inputs divided by Component name
|
| 945 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 946 |
+
|
| 947 |
+
:returns: The name and the instance of the next Component that can be run
|
| 948 |
+
"""
|
| 949 |
+
all_lazy_variadic = True
|
| 950 |
+
all_with_default_inputs = True
|
| 951 |
+
|
| 952 |
+
filtered_waiting_queue = []
|
| 953 |
+
|
| 954 |
+
for name, comp in waiting_queue:
|
| 955 |
+
if not _is_lazy_variadic(comp):
|
| 956 |
+
# Components with variadic inputs that are not greedy must be removed only if there's nothing else to
|
| 957 |
+
# run at this stage.
|
| 958 |
+
# We need to wait as long as possible to run them, so we can collect as most inputs as we can.
|
| 959 |
+
all_lazy_variadic = False
|
| 960 |
+
|
| 961 |
+
if not _has_all_inputs_with_defaults(comp):
|
| 962 |
+
# Components that have defaults for all their inputs must be treated the same identical way as we treat
|
| 963 |
+
# lazy variadic components. If there are only components with defaults we can run them.
|
| 964 |
+
# If we don't do this the order of execution of the Pipeline's Components will be affected cause we
|
| 965 |
+
# enqueue the Components in `run_queue` at the start using the order they are added in the Pipeline.
|
| 966 |
+
# If a Component A with defaults is added before a Component B that has no defaults, but in the Pipeline
|
| 967 |
+
# logic A must be executed after B. However, B could run before A if we don't do this check.
|
| 968 |
+
all_with_default_inputs = False
|
| 969 |
+
|
| 970 |
+
if not _is_lazy_variadic(comp) and not _has_all_inputs_with_defaults(comp):
|
| 971 |
+
# Keep track of the Components that are not lazy variadic and don't have all inputs with defaults.
|
| 972 |
+
# We'll handle these later if necessary.
|
| 973 |
+
filtered_waiting_queue.append((name, comp))
|
| 974 |
+
|
| 975 |
+
# If all Components are lazy variadic or all Components have all inputs with defaults we can get one to run
|
| 976 |
+
if all_lazy_variadic or all_with_default_inputs:
|
| 977 |
+
return waiting_queue[0]
|
| 978 |
+
|
| 979 |
+
for name, comp in filtered_waiting_queue:
|
| 980 |
+
# Find the first component that has all the inputs it needs to run
|
| 981 |
+
has_enough_inputs = True
|
| 982 |
+
for input_socket in comp.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 983 |
+
if input_socket.name not in components_inputs.get(name, {}) and input_socket.is_mandatory:
|
| 984 |
+
has_enough_inputs = False
|
| 985 |
+
break
|
| 986 |
+
|
| 987 |
+
if has_enough_inputs:
|
| 988 |
+
return name, comp
|
| 989 |
+
|
| 990 |
+
# If we reach this point it means that we found no Component that has enough inputs to run.
|
| 991 |
+
# Ideally we should never reach this point, though we can't raise an exception either as
|
| 992 |
+
# existing use cases rely on this behavior.
|
| 993 |
+
# So we return the last Component, that could be the last from waiting_queue or filtered_waiting_queue.
|
| 994 |
+
return name, comp
|
| 995 |
+
|
| 996 |
+
def _find_next_runnable_lazy_variadic_or_default_component(
|
| 997 |
+
self, waiting_queue: List[Tuple[str, Component]]
|
| 998 |
+
) -> Tuple[str, Component]:
|
| 999 |
+
"""
|
| 1000 |
+
Finds the next Component that can be run and has a lazy variadic input or all inputs with default values.
|
| 1001 |
+
|
| 1002 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1003 |
+
|
| 1004 |
+
:returns: The name and the instance of the next Component that can be run
|
| 1005 |
+
"""
|
| 1006 |
+
for name, comp in waiting_queue:
|
| 1007 |
+
is_lazy_variadic = _is_lazy_variadic(comp)
|
| 1008 |
+
has_only_defaults = _has_all_inputs_with_defaults(comp)
|
| 1009 |
+
if is_lazy_variadic or has_only_defaults:
|
| 1010 |
+
return name, comp
|
| 1011 |
+
|
| 1012 |
+
# If we reach this point it means that we found no Component that has a lazy variadic input or all inputs with
|
| 1013 |
+
# default values to run.
|
| 1014 |
+
# Similar to `_find_next_runnable_component` we might not find the Component we want, so we optimistically
|
| 1015 |
+
# return the last Component in the list.
|
| 1016 |
+
# We're probably stuck in a loop in this case, but we can't raise an exception as existing use cases might
|
| 1017 |
+
# rely on this behaviour.
|
| 1018 |
+
# The loop detection will be handled later on.
|
| 1019 |
+
return name, comp
|
| 1020 |
+
|
| 1021 |
+
def _find_components_that_will_receive_no_input(
|
| 1022 |
+
self, component_name: str, component_result: Dict[str, Any], components_inputs: Dict[str, Dict[str, Any]]
|
| 1023 |
+
) -> Set[Tuple[str, Component]]:
|
| 1024 |
+
"""
|
| 1025 |
+
Find all the Components that are connected to component_name and didn't receive any input from it.
|
| 1026 |
+
|
| 1027 |
+
Components that have a Variadic input and received already some input from other Components
|
| 1028 |
+
but not from component_name won't be returned as they have enough inputs to run.
|
| 1029 |
+
|
| 1030 |
+
This includes the descendants of the Components that didn't receive any input from component_name.
|
| 1031 |
+
That is necessary to avoid getting stuck into infinite loops waiting for inputs that will never arrive.
|
| 1032 |
+
|
| 1033 |
+
:param component_name: Name of the Component that created the output
|
| 1034 |
+
:param component_result: Output of the Component
|
| 1035 |
+
:param components_inputs: The current state of the inputs divided by Component name
|
| 1036 |
+
:return: A set of Components that didn't receive any input from component_name
|
| 1037 |
+
"""
|
| 1038 |
+
|
| 1039 |
+
# Simplifies the check if a Component is Variadic and received some input from other Components.
|
| 1040 |
+
def has_variadic_socket_with_existing_inputs(
|
| 1041 |
+
component: Component, component_name: str, sender_name: str, components_inputs: Dict[str, Dict[str, Any]]
|
| 1042 |
+
) -> bool:
|
| 1043 |
+
for socket in component.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 1044 |
+
if sender_name not in socket.senders:
|
| 1045 |
+
continue
|
| 1046 |
+
if socket.is_variadic and len(components_inputs.get(component_name, {}).get(socket.name, [])) > 0:
|
| 1047 |
+
return True
|
| 1048 |
+
return False
|
| 1049 |
+
|
| 1050 |
+
# Makes it easier to verify if all connections between two Components are optional
|
| 1051 |
+
def all_connections_are_optional(sender_name: str, receiver: Component) -> bool:
|
| 1052 |
+
for socket in receiver.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 1053 |
+
if sender_name not in socket.senders:
|
| 1054 |
+
continue
|
| 1055 |
+
if socket.is_mandatory:
|
| 1056 |
+
return False
|
| 1057 |
+
return True
|
| 1058 |
+
|
| 1059 |
+
# Eases checking if other connections that are not between sender_name and receiver_name
|
| 1060 |
+
# already received inputs
|
| 1061 |
+
def other_connections_received_input(sender_name: str, receiver_name: str) -> bool:
|
| 1062 |
+
receiver: Component = self.graph.nodes[receiver_name]["instance"]
|
| 1063 |
+
for receiver_socket in receiver.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 1064 |
+
if sender_name in receiver_socket.senders:
|
| 1065 |
+
continue
|
| 1066 |
+
if components_inputs.get(receiver_name, {}).get(receiver_socket.name) is not None:
|
| 1067 |
+
return True
|
| 1068 |
+
return False
|
| 1069 |
+
|
| 1070 |
+
components = set()
|
| 1071 |
+
instance: Component = self.graph.nodes[component_name]["instance"]
|
| 1072 |
+
for socket_name, socket in instance.__haystack_output__._sockets_dict.items(): # type: ignore
|
| 1073 |
+
if socket_name in component_result:
|
| 1074 |
+
continue
|
| 1075 |
+
for receiver in socket.receivers:
|
| 1076 |
+
receiver_instance: Component = self.graph.nodes[receiver]["instance"]
|
| 1077 |
+
|
| 1078 |
+
if has_variadic_socket_with_existing_inputs(
|
| 1079 |
+
receiver_instance, receiver, component_name, components_inputs
|
| 1080 |
+
):
|
| 1081 |
+
# Components with Variadic input that already received some input
|
| 1082 |
+
# can still run, even if branch is skipped.
|
| 1083 |
+
# If we remove them they won't run.
|
| 1084 |
+
continue
|
| 1085 |
+
|
| 1086 |
+
if all_connections_are_optional(component_name, receiver_instance) and other_connections_received_input(
|
| 1087 |
+
component_name, receiver
|
| 1088 |
+
):
|
| 1089 |
+
# If all the connections between component_name and receiver are optional
|
| 1090 |
+
# and receiver received other inputs already it still has enough inputs to run.
|
| 1091 |
+
# Even if it didn't receive input from component_name, so we can't remove it or its
|
| 1092 |
+
# descendants.
|
| 1093 |
+
continue
|
| 1094 |
+
|
| 1095 |
+
components.add((receiver, receiver_instance))
|
| 1096 |
+
# Get the descendants too. When we remove a Component that received no input
|
| 1097 |
+
# it's extremely likely that its descendants will receive no input as well.
|
| 1098 |
+
# This is fine even if the Pipeline will merge back into a single Component
|
| 1099 |
+
# at a certain point. The merging Component will be put back into the run
|
| 1100 |
+
# queue at a later stage.
|
| 1101 |
+
for descendant_name in networkx.descendants(self.graph, receiver):
|
| 1102 |
+
descendant = self.graph.nodes[descendant_name]["instance"]
|
| 1103 |
+
|
| 1104 |
+
# Components with Variadic input that already received some input
|
| 1105 |
+
# can still run, even if branch is skipped.
|
| 1106 |
+
# If we remove them they won't run.
|
| 1107 |
+
if has_variadic_socket_with_existing_inputs(
|
| 1108 |
+
descendant, descendant_name, receiver, components_inputs
|
| 1109 |
+
):
|
| 1110 |
+
continue
|
| 1111 |
+
|
| 1112 |
+
components.add((descendant_name, descendant))
|
| 1113 |
+
|
| 1114 |
+
return components
|
| 1115 |
+
|
| 1116 |
+
def _is_stuck_in_a_loop(self, waiting_queue: List[Tuple[str, Component]]) -> bool:
|
| 1117 |
+
"""
|
| 1118 |
+
Checks if the Pipeline is stuck in a loop.
|
| 1119 |
+
|
| 1120 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1121 |
+
|
| 1122 |
+
:returns: True if the Pipeline is stuck in a loop, False otherwise
|
| 1123 |
+
"""
|
| 1124 |
+
# Are we actually stuck or there's a lazy variadic or a component with has only default inputs
|
| 1125 |
+
# waiting for input?
|
| 1126 |
+
# This is our last resort, if there's no lazy variadic or component with only default inputs
|
| 1127 |
+
# waiting for input we're stuck for real and we can't make any progress.
|
| 1128 |
+
component_found = False
|
| 1129 |
+
for _, comp in waiting_queue:
|
| 1130 |
+
if _is_lazy_variadic(comp) or _has_all_inputs_with_defaults(comp):
|
| 1131 |
+
component_found = True
|
| 1132 |
+
break
|
| 1133 |
+
|
| 1134 |
+
if not component_found:
|
| 1135 |
+
# We're stuck in a loop for real, we can't make any progress.
|
| 1136 |
+
# BAIL!
|
| 1137 |
+
return True
|
| 1138 |
+
|
| 1139 |
+
# If we have a single component with no variadic input or only default inputs waiting for input
|
| 1140 |
+
# it means it has been waiting for input for at least 2 iterations.
|
| 1141 |
+
# This will never run.
|
| 1142 |
+
# BAIL!
|
| 1143 |
+
return len(waiting_queue) == 1
|
| 1144 |
+
|
| 1145 |
+
def _component_has_enough_inputs_to_run(self, name: str, inputs: Dict[str, Dict[str, Any]]) -> bool:
|
| 1146 |
+
"""
|
| 1147 |
+
Returns True if the Component has all the inputs it needs to run.
|
| 1148 |
+
|
| 1149 |
+
:param name: Name of the Component as defined in the Pipeline.
|
| 1150 |
+
:param inputs: The current state of the inputs divided by Component name.
|
| 1151 |
+
|
| 1152 |
+
:return: Whether the Component can run or not.
|
| 1153 |
+
"""
|
| 1154 |
+
instance: Component = self.graph.nodes[name]["instance"]
|
| 1155 |
+
if name not in inputs:
|
| 1156 |
+
return False
|
| 1157 |
+
expected_inputs = instance.__haystack_input__._sockets_dict.keys() # type: ignore
|
| 1158 |
+
current_inputs = inputs[name].keys()
|
| 1159 |
+
return expected_inputs == current_inputs
|
| 1160 |
+
|
| 1161 |
+
def _break_supported_cycles_in_graph(self) -> Tuple[networkx.MultiDiGraph, Dict[str, List[List[str]]]]:
|
| 1162 |
+
"""
|
| 1163 |
+
Utility function to remove supported cycles in the Pipeline's graph.
|
| 1164 |
+
|
| 1165 |
+
Given that the Pipeline execution would wait to run a Component until it has received
|
| 1166 |
+
all its mandatory inputs, it doesn't make sense for us to try and break cycles by
|
| 1167 |
+
removing a connection to a mandatory input. The Pipeline would just get stuck at a later time.
|
| 1168 |
+
|
| 1169 |
+
So we can only break connections in cycles that have a Variadic or GreedyVariadic type or a default value.
|
| 1170 |
+
|
| 1171 |
+
This will raise a PipelineRuntimeError if we there are cycles that can't be broken.
|
| 1172 |
+
That is bound to happen when at least one of the inputs in a cycle is mandatory.
|
| 1173 |
+
|
| 1174 |
+
If the Pipeline's graph doesn't have any cycle it will just return that graph and an empty dictionary.
|
| 1175 |
+
|
| 1176 |
+
:returns:
|
| 1177 |
+
A tuple containing:
|
| 1178 |
+
* A copy of the Pipeline's graph without cycles
|
| 1179 |
+
* A dictionary of Component's names and a list of all the cycles they were part of.
|
| 1180 |
+
The cycles are a list of Component's names that create that cycle.
|
| 1181 |
+
"""
|
| 1182 |
+
if networkx.is_directed_acyclic_graph(self.graph):
|
| 1183 |
+
return self.graph, {}
|
| 1184 |
+
|
| 1185 |
+
temp_graph: networkx.MultiDiGraph = self.graph.copy()
|
| 1186 |
+
# A list of all the cycles that are found in the graph, each inner list contains
|
| 1187 |
+
# the Component names that create that cycle.
|
| 1188 |
+
cycles: List[List[str]] = list(networkx.simple_cycles(self.graph))
|
| 1189 |
+
# Maps a Component name to a list of its output socket names that have been broken
|
| 1190 |
+
edges_removed: Dict[str, List[str]] = defaultdict(list)
|
| 1191 |
+
# This keeps track of all the cycles that a component is part of.
|
| 1192 |
+
# Maps a Component name to a list of cycles, each inner list contains
|
| 1193 |
+
# the Component names that create that cycle (the key will also be
|
| 1194 |
+
# an element in each list). The last Component in each list is implicitly
|
| 1195 |
+
# connected to the first.
|
| 1196 |
+
components_in_cycles: Dict[str, List[List[str]]] = defaultdict(list)
|
| 1197 |
+
|
| 1198 |
+
# Used to minimize the number of time we check whether the graph has any more
|
| 1199 |
+
# cycles left to break or not.
|
| 1200 |
+
graph_has_cycles = True
|
| 1201 |
+
|
| 1202 |
+
# Iterate all the cycles to find the least amount of connections that we can remove
|
| 1203 |
+
# to make the Pipeline graph acyclic.
|
| 1204 |
+
# As soon as the graph is acyclic we stop breaking connections and return.
|
| 1205 |
+
for cycle in cycles:
|
| 1206 |
+
for comp in cycle:
|
| 1207 |
+
components_in_cycles[comp].append(cycle)
|
| 1208 |
+
|
| 1209 |
+
# Iterate this cycle, we zip the cycle with itself so that at the last iteration
|
| 1210 |
+
# sender_comp will be the last element of cycle and receiver_comp will be the first.
|
| 1211 |
+
# So if cycle is [1, 2, 3, 4] we would call zip([1, 2, 3, 4], [2, 3, 4, 1]).
|
| 1212 |
+
for sender_comp, receiver_comp in zip(cycle, cycle[1:] + cycle[:1]):
|
| 1213 |
+
# We get the key and iterate those as we want to edit the graph data while
|
| 1214 |
+
# iterating the edges and that would raise.
|
| 1215 |
+
# Even though the connection key set in Pipeline.connect() uses only the
|
| 1216 |
+
# sockets name we don't have clashes since it's only used to differentiate
|
| 1217 |
+
# multiple edges between two nodes.
|
| 1218 |
+
edge_keys = list(temp_graph.get_edge_data(sender_comp, receiver_comp).keys())
|
| 1219 |
+
for edge_key in edge_keys:
|
| 1220 |
+
edge_data = temp_graph.get_edge_data(sender_comp, receiver_comp)[edge_key]
|
| 1221 |
+
receiver_socket = edge_data["to_socket"]
|
| 1222 |
+
if not receiver_socket.is_variadic and receiver_socket.is_mandatory:
|
| 1223 |
+
continue
|
| 1224 |
+
|
| 1225 |
+
# We found a breakable edge
|
| 1226 |
+
sender_socket = edge_data["from_socket"]
|
| 1227 |
+
edges_removed[sender_comp].append(sender_socket.name)
|
| 1228 |
+
temp_graph.remove_edge(sender_comp, receiver_comp, edge_key)
|
| 1229 |
+
|
| 1230 |
+
graph_has_cycles = not networkx.is_directed_acyclic_graph(temp_graph)
|
| 1231 |
+
if not graph_has_cycles:
|
| 1232 |
+
# We removed all the cycles, we can stop
|
| 1233 |
+
break
|
| 1234 |
+
|
| 1235 |
+
if not graph_has_cycles:
|
| 1236 |
+
# We removed all the cycles, nice
|
| 1237 |
+
break
|
| 1238 |
+
|
| 1239 |
+
if graph_has_cycles:
|
| 1240 |
+
msg = "Pipeline contains a cycle that we can't execute"
|
| 1241 |
+
raise PipelineRuntimeError(msg)
|
| 1242 |
+
|
| 1243 |
+
return temp_graph, components_in_cycles
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
def _connections_status(
|
| 1247 |
+
sender_node: str, receiver_node: str, sender_sockets: List[OutputSocket], receiver_sockets: List[InputSocket]
|
| 1248 |
+
):
|
| 1249 |
+
"""
|
| 1250 |
+
Lists the status of the sockets, for error messages.
|
| 1251 |
+
"""
|
| 1252 |
+
sender_sockets_entries = []
|
| 1253 |
+
for sender_socket in sender_sockets:
|
| 1254 |
+
sender_sockets_entries.append(f" - {sender_socket.name}: {_type_name(sender_socket.type)}")
|
| 1255 |
+
sender_sockets_list = "\n".join(sender_sockets_entries)
|
| 1256 |
+
|
| 1257 |
+
receiver_sockets_entries = []
|
| 1258 |
+
for receiver_socket in receiver_sockets:
|
| 1259 |
+
if receiver_socket.senders:
|
| 1260 |
+
sender_status = f"sent by {','.join(receiver_socket.senders)}"
|
| 1261 |
+
else:
|
| 1262 |
+
sender_status = "available"
|
| 1263 |
+
receiver_sockets_entries.append(
|
| 1264 |
+
f" - {receiver_socket.name}: {_type_name(receiver_socket.type)} ({sender_status})"
|
| 1265 |
+
)
|
| 1266 |
+
receiver_sockets_list = "\n".join(receiver_sockets_entries)
|
| 1267 |
+
|
| 1268 |
+
return f"'{sender_node}':\n{sender_sockets_list}\n'{receiver_node}':\n{receiver_sockets_list}"
|
| 1269 |
+
|
| 1270 |
+
|
| 1271 |
+
def _is_lazy_variadic(c: Component) -> bool:
|
| 1272 |
+
"""
|
| 1273 |
+
Small utility function to check if a Component has at least a Variadic input and no GreedyVariadic input.
|
| 1274 |
+
"""
|
| 1275 |
+
is_variadic = any(
|
| 1276 |
+
socket.is_variadic
|
| 1277 |
+
for socket in c.__haystack_input__._sockets_dict.values() # type: ignore
|
| 1278 |
+
)
|
| 1279 |
+
if not is_variadic:
|
| 1280 |
+
return False
|
| 1281 |
+
return not any(
|
| 1282 |
+
socket.is_greedy
|
| 1283 |
+
for socket in c.__haystack_input__._sockets_dict.values() # type: ignore
|
| 1284 |
+
)
|
| 1285 |
+
|
| 1286 |
+
|
| 1287 |
+
def _has_all_inputs_with_defaults(c: Component) -> bool:
|
| 1288 |
+
"""
|
| 1289 |
+
Small utility function to check if a Component has all inputs with defaults.
|
| 1290 |
+
"""
|
| 1291 |
+
return all(
|
| 1292 |
+
not socket.is_mandatory
|
| 1293 |
+
for socket in c.__haystack_input__._sockets_dict.values() # type: ignore
|
| 1294 |
+
)
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
def _add_missing_input_defaults(name: str, comp: Component, components_inputs: Dict[str, Dict[str, Any]]):
|
| 1298 |
+
"""
|
| 1299 |
+
Updates the inputs with the default values for the inputs that are missing
|
| 1300 |
+
|
| 1301 |
+
:param name: Name of the Component
|
| 1302 |
+
:param comp: Instance of the Component
|
| 1303 |
+
:param components_inputs: The current state of the inputs divided by Component name
|
| 1304 |
+
"""
|
| 1305 |
+
if name not in components_inputs:
|
| 1306 |
+
components_inputs[name] = {}
|
| 1307 |
+
|
| 1308 |
+
for input_socket in comp.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 1309 |
+
if input_socket.is_mandatory:
|
| 1310 |
+
continue
|
| 1311 |
+
|
| 1312 |
+
if input_socket.name not in components_inputs[name]:
|
| 1313 |
+
components_inputs[name][input_socket.name] = input_socket.default_value
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def _enqueue_component(
|
| 1317 |
+
component_pair: Tuple[str, Component],
|
| 1318 |
+
run_queue: List[Tuple[str, Component]],
|
| 1319 |
+
waiting_queue: List[Tuple[str, Component]],
|
| 1320 |
+
):
|
| 1321 |
+
"""
|
| 1322 |
+
Append a Component in the queue of Components to run if not already in it.
|
| 1323 |
+
|
| 1324 |
+
Remove it from the waiting list if it's there.
|
| 1325 |
+
|
| 1326 |
+
:param component_pair: Tuple of Component name and instance
|
| 1327 |
+
:param run_queue: Queue of Components to run
|
| 1328 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1329 |
+
"""
|
| 1330 |
+
if component_pair in waiting_queue:
|
| 1331 |
+
waiting_queue.remove(component_pair)
|
| 1332 |
+
|
| 1333 |
+
if component_pair not in run_queue:
|
| 1334 |
+
run_queue.append(component_pair)
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
def _dequeue_component(
|
| 1338 |
+
component_pair: Tuple[str, Component],
|
| 1339 |
+
run_queue: List[Tuple[str, Component]],
|
| 1340 |
+
waiting_queue: List[Tuple[str, Component]],
|
| 1341 |
+
):
|
| 1342 |
+
"""
|
| 1343 |
+
Removes a Component both from the queue of Components to run and the waiting list.
|
| 1344 |
+
|
| 1345 |
+
:param component_pair: Tuple of Component name and instance
|
| 1346 |
+
:param run_queue: Queue of Components to run
|
| 1347 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1348 |
+
"""
|
| 1349 |
+
if component_pair in waiting_queue:
|
| 1350 |
+
waiting_queue.remove(component_pair)
|
| 1351 |
+
|
| 1352 |
+
if component_pair in run_queue:
|
| 1353 |
+
run_queue.remove(component_pair)
|
| 1354 |
+
|
| 1355 |
+
|
| 1356 |
+
def _enqueue_waiting_component(component_pair: Tuple[str, Component], waiting_queue: List[Tuple[str, Component]]):
|
| 1357 |
+
"""
|
| 1358 |
+
Append a Component in the queue of Components that are waiting for inputs if not already in it.
|
| 1359 |
+
|
| 1360 |
+
:param component_pair: Tuple of Component name and instance
|
| 1361 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1362 |
+
"""
|
| 1363 |
+
if component_pair not in waiting_queue:
|
| 1364 |
+
waiting_queue.append(component_pair)
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
def _dequeue_waiting_component(component_pair: Tuple[str, Component], waiting_queue: List[Tuple[str, Component]]):
|
| 1368 |
+
"""
|
| 1369 |
+
Removes a Component from the queue of Components that are waiting for inputs.
|
| 1370 |
+
|
| 1371 |
+
:param component_pair: Tuple of Component name and instance
|
| 1372 |
+
:param waiting_queue: Queue of Components waiting for input
|
| 1373 |
+
"""
|
| 1374 |
+
if component_pair in waiting_queue:
|
| 1375 |
+
waiting_queue.remove(component_pair)
|
testbed/deepset-ai__haystack/haystack/core/pipeline/descriptions.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Dict, List
|
| 6 |
+
|
| 7 |
+
import networkx # type:ignore
|
| 8 |
+
|
| 9 |
+
from haystack import logging
|
| 10 |
+
from haystack.core.component.types import InputSocket, OutputSocket
|
| 11 |
+
from haystack.core.type_utils import _type_name
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def find_pipeline_inputs(
|
| 17 |
+
graph: networkx.MultiDiGraph, include_connected_sockets: bool = False
|
| 18 |
+
) -> Dict[str, List[InputSocket]]:
|
| 19 |
+
"""
|
| 20 |
+
Collect components that have disconnected/connected input sockets.
|
| 21 |
+
|
| 22 |
+
Note that this method returns *ALL* disconnected input sockets, including all such sockets with default values.
|
| 23 |
+
"""
|
| 24 |
+
return {
|
| 25 |
+
name: [
|
| 26 |
+
socket
|
| 27 |
+
for socket in data.get("input_sockets", {}).values()
|
| 28 |
+
if socket.is_variadic or (include_connected_sockets or not socket.senders)
|
| 29 |
+
]
|
| 30 |
+
for name, data in graph.nodes(data=True)
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def find_pipeline_outputs(
|
| 35 |
+
graph: networkx.MultiDiGraph, include_connected_sockets: bool = False
|
| 36 |
+
) -> Dict[str, List[OutputSocket]]:
|
| 37 |
+
"""
|
| 38 |
+
Collect components that have disconnected/connected output sockets. They define the pipeline output.
|
| 39 |
+
"""
|
| 40 |
+
return {
|
| 41 |
+
name: [
|
| 42 |
+
socket
|
| 43 |
+
for socket in data.get("output_sockets", {}).values()
|
| 44 |
+
if (include_connected_sockets or not socket.receivers)
|
| 45 |
+
]
|
| 46 |
+
for name, data in graph.nodes(data=True)
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def describe_pipeline_inputs(graph: networkx.MultiDiGraph):
|
| 51 |
+
"""
|
| 52 |
+
Returns a dictionary with the input names and types that this pipeline accepts.
|
| 53 |
+
"""
|
| 54 |
+
inputs = {
|
| 55 |
+
comp: {socket.name: {"type": socket.type, "is_mandatory": socket.is_mandatory} for socket in data}
|
| 56 |
+
for comp, data in find_pipeline_inputs(graph).items()
|
| 57 |
+
if data
|
| 58 |
+
}
|
| 59 |
+
return inputs
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def describe_pipeline_inputs_as_string(graph: networkx.MultiDiGraph):
|
| 63 |
+
"""
|
| 64 |
+
Returns a string representation of the input names and types that this pipeline accepts.
|
| 65 |
+
"""
|
| 66 |
+
inputs = describe_pipeline_inputs(graph)
|
| 67 |
+
message = "This pipeline expects the following inputs:\n"
|
| 68 |
+
for comp, sockets in inputs.items():
|
| 69 |
+
if sockets:
|
| 70 |
+
message += f"- {comp}:\n"
|
| 71 |
+
for name, socket in sockets.items():
|
| 72 |
+
message += f" - {name}: {_type_name(socket['type'])}\n"
|
| 73 |
+
return message
|
testbed/deepset-ai__haystack/haystack/core/pipeline/draw.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import base64
|
| 6 |
+
|
| 7 |
+
import networkx # type:ignore
|
| 8 |
+
import requests
|
| 9 |
+
|
| 10 |
+
from haystack import logging
|
| 11 |
+
from haystack.core.errors import PipelineDrawingError
|
| 12 |
+
from haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs
|
| 13 |
+
from haystack.core.type_utils import _type_name
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _prepare_for_drawing(graph: networkx.MultiDiGraph) -> networkx.MultiDiGraph:
|
| 19 |
+
"""
|
| 20 |
+
Add some extra nodes to show the inputs and outputs of the pipeline.
|
| 21 |
+
|
| 22 |
+
Also adds labels to edges.
|
| 23 |
+
"""
|
| 24 |
+
# Label the edges
|
| 25 |
+
for inp, outp, key, data in graph.edges(keys=True, data=True):
|
| 26 |
+
data["label"] = (
|
| 27 |
+
f"{data['from_socket'].name} -> {data['to_socket'].name}{' (opt.)' if not data['mandatory'] else ''}"
|
| 28 |
+
)
|
| 29 |
+
graph.add_edge(inp, outp, key=key, **data)
|
| 30 |
+
|
| 31 |
+
# Add inputs fake node
|
| 32 |
+
graph.add_node("input")
|
| 33 |
+
for node, in_sockets in find_pipeline_inputs(graph).items():
|
| 34 |
+
for in_socket in in_sockets:
|
| 35 |
+
if not in_socket.senders and in_socket.is_mandatory:
|
| 36 |
+
# If this socket has no sender it could be a socket that receives input
|
| 37 |
+
# directly when running the Pipeline. We can't know that for sure, in doubt
|
| 38 |
+
# we draw it as receiving input directly.
|
| 39 |
+
graph.add_edge("input", node, label=in_socket.name, conn_type=_type_name(in_socket.type))
|
| 40 |
+
|
| 41 |
+
# Add outputs fake node
|
| 42 |
+
graph.add_node("output")
|
| 43 |
+
for node, out_sockets in find_pipeline_outputs(graph).items():
|
| 44 |
+
for out_socket in out_sockets:
|
| 45 |
+
graph.add_edge(node, "output", label=out_socket.name, conn_type=_type_name(out_socket.type))
|
| 46 |
+
|
| 47 |
+
return graph
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
ARROWTAIL_MANDATORY = "--"
|
| 51 |
+
ARROWTAIL_OPTIONAL = "-."
|
| 52 |
+
ARROWHEAD_MANDATORY = "-->"
|
| 53 |
+
ARROWHEAD_OPTIONAL = ".->"
|
| 54 |
+
MERMAID_STYLED_TEMPLATE = """
|
| 55 |
+
%%{{ init: {{'theme': 'neutral' }} }}%%
|
| 56 |
+
|
| 57 |
+
graph TD;
|
| 58 |
+
|
| 59 |
+
{connections}
|
| 60 |
+
|
| 61 |
+
classDef component text-align:center;
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _to_mermaid_image(graph: networkx.MultiDiGraph):
|
| 66 |
+
"""
|
| 67 |
+
Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.
|
| 68 |
+
"""
|
| 69 |
+
# Copy the graph to avoid modifying the original
|
| 70 |
+
graph_styled = _to_mermaid_text(graph.copy())
|
| 71 |
+
|
| 72 |
+
graphbytes = graph_styled.encode("ascii")
|
| 73 |
+
base64_bytes = base64.b64encode(graphbytes)
|
| 74 |
+
base64_string = base64_bytes.decode("ascii")
|
| 75 |
+
url = f"https://mermaid.ink/img/{base64_string}?type=png"
|
| 76 |
+
|
| 77 |
+
logger.debug("Rendering graph at {url}", url=url)
|
| 78 |
+
try:
|
| 79 |
+
resp = requests.get(url, timeout=10)
|
| 80 |
+
if resp.status_code >= 400:
|
| 81 |
+
logger.warning(
|
| 82 |
+
"Failed to draw the pipeline: https://mermaid.ink/img/ returned status {status_code}",
|
| 83 |
+
status_code=resp.status_code,
|
| 84 |
+
)
|
| 85 |
+
logger.info("Exact URL requested: {url}", url=url)
|
| 86 |
+
logger.warning("No pipeline diagram will be saved.")
|
| 87 |
+
resp.raise_for_status()
|
| 88 |
+
|
| 89 |
+
except Exception as exc: # pylint: disable=broad-except
|
| 90 |
+
logger.warning(
|
| 91 |
+
"Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ ({error})", error=exc
|
| 92 |
+
)
|
| 93 |
+
logger.info("Exact URL requested: {url}", url=url)
|
| 94 |
+
logger.warning("No pipeline diagram will be saved.")
|
| 95 |
+
raise PipelineDrawingError(
|
| 96 |
+
"There was an issue with https://mermaid.ink/, see the stacktrace for details."
|
| 97 |
+
) from exc
|
| 98 |
+
|
| 99 |
+
return resp.content
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:
|
| 103 |
+
"""
|
| 104 |
+
Converts a Networkx graph into Mermaid syntax.
|
| 105 |
+
|
| 106 |
+
The output of this function can be used in the documentation with `mermaid` codeblocks and will be
|
| 107 |
+
automatically rendered.
|
| 108 |
+
"""
|
| 109 |
+
# Copy the graph to avoid modifying the original
|
| 110 |
+
graph = _prepare_for_drawing(graph.copy())
|
| 111 |
+
sockets = {
|
| 112 |
+
comp: "".join(
|
| 113 |
+
[
|
| 114 |
+
f"<li>{name} ({_type_name(socket.type)})</li>"
|
| 115 |
+
for name, socket in data.get("input_sockets", {}).items()
|
| 116 |
+
if (not socket.is_mandatory and not socket.senders) or socket.is_variadic
|
| 117 |
+
]
|
| 118 |
+
)
|
| 119 |
+
for comp, data in graph.nodes(data=True)
|
| 120 |
+
}
|
| 121 |
+
optional_inputs = {
|
| 122 |
+
comp: f"<br><br>Optional inputs:<ul style='text-align:left;'>{sockets}</ul>" if sockets else ""
|
| 123 |
+
for comp, sockets in sockets.items()
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
states = {
|
| 127 |
+
comp: f"{comp}[\"<b>{comp}</b><br><small><i>{type(data['instance']).__name__}{optional_inputs[comp]}</i></small>\"]:::component" # noqa
|
| 128 |
+
for comp, data in graph.nodes(data=True)
|
| 129 |
+
if comp not in ["input", "output"]
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
connections_list = []
|
| 133 |
+
for from_comp, to_comp, conn_data in graph.edges(data=True):
|
| 134 |
+
if from_comp != "input" and to_comp != "output":
|
| 135 |
+
arrowtail = ARROWTAIL_MANDATORY if conn_data["mandatory"] else ARROWTAIL_OPTIONAL
|
| 136 |
+
arrowhead = ARROWHEAD_MANDATORY if conn_data["mandatory"] else ARROWHEAD_OPTIONAL
|
| 137 |
+
label = f'"{conn_data["label"]}<br><small><i>{conn_data["conn_type"]}</i></small>"'
|
| 138 |
+
conn_string = f"{states[from_comp]} {arrowtail} {label} {arrowhead} {states[to_comp]}"
|
| 139 |
+
connections_list.append(conn_string)
|
| 140 |
+
|
| 141 |
+
input_connections = [
|
| 142 |
+
f"i{{*}}--\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> {states[to_comp]}"
|
| 143 |
+
for _, to_comp, conn_data in graph.out_edges("input", data=True)
|
| 144 |
+
]
|
| 145 |
+
output_connections = [
|
| 146 |
+
f"{states[from_comp]}--\"{conn_data['label']}<br><small><i>{conn_data['conn_type']}</i></small>\"--> o{{*}}"
|
| 147 |
+
for from_comp, _, conn_data in graph.in_edges("output", data=True)
|
| 148 |
+
]
|
| 149 |
+
connections = "\n".join(connections_list + input_connections + output_connections)
|
| 150 |
+
|
| 151 |
+
graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)
|
| 152 |
+
logger.debug("Mermaid diagram:\n{diagram}", diagram=graph_styled)
|
| 153 |
+
|
| 154 |
+
return graph_styled
|
testbed/deepset-ai__haystack/haystack/core/pipeline/pipeline.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple
|
| 7 |
+
from warnings import warn
|
| 8 |
+
|
| 9 |
+
import networkx as nx
|
| 10 |
+
|
| 11 |
+
from haystack import logging, tracing
|
| 12 |
+
from haystack.core.component import Component
|
| 13 |
+
from haystack.core.errors import PipelineMaxComponentRuns, PipelineRuntimeError
|
| 14 |
+
from haystack.core.pipeline.base import (
|
| 15 |
+
_dequeue_component,
|
| 16 |
+
_dequeue_waiting_component,
|
| 17 |
+
_enqueue_component,
|
| 18 |
+
_enqueue_waiting_component,
|
| 19 |
+
)
|
| 20 |
+
from haystack.telemetry import pipeline_running
|
| 21 |
+
|
| 22 |
+
from .base import PipelineBase, _add_missing_input_defaults, _is_lazy_variadic
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Pipeline(PipelineBase):
|
| 28 |
+
"""
|
| 29 |
+
Synchronous version of the orchestration engine.
|
| 30 |
+
|
| 31 |
+
Orchestrates component execution according to the execution graph, one after the other.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def _run_component(
|
| 35 |
+
self, name: str, inputs: Dict[str, Any], parent_span: Optional[tracing.Span] = None
|
| 36 |
+
) -> Dict[str, Any]:
|
| 37 |
+
"""
|
| 38 |
+
Runs a Component with the given inputs.
|
| 39 |
+
|
| 40 |
+
:param name: Name of the Component as defined in the Pipeline.
|
| 41 |
+
:param inputs: Inputs for the Component.
|
| 42 |
+
:param parent_span: The parent span to use for the newly created span.
|
| 43 |
+
This is to allow tracing to be correctly linked to the pipeline run.
|
| 44 |
+
:raises PipelineRuntimeError: If Component doesn't return a dictionary.
|
| 45 |
+
:return: The output of the Component.
|
| 46 |
+
"""
|
| 47 |
+
instance: Component = self.graph.nodes[name]["instance"]
|
| 48 |
+
|
| 49 |
+
with tracing.tracer.trace(
|
| 50 |
+
"haystack.component.run",
|
| 51 |
+
tags={
|
| 52 |
+
"haystack.component.name": name,
|
| 53 |
+
"haystack.component.type": instance.__class__.__name__,
|
| 54 |
+
"haystack.component.input_types": {k: type(v).__name__ for k, v in inputs.items()},
|
| 55 |
+
"haystack.component.input_spec": {
|
| 56 |
+
key: {
|
| 57 |
+
"type": (value.type.__name__ if isinstance(value.type, type) else str(value.type)),
|
| 58 |
+
"senders": value.senders,
|
| 59 |
+
}
|
| 60 |
+
for key, value in instance.__haystack_input__._sockets_dict.items() # type: ignore
|
| 61 |
+
},
|
| 62 |
+
"haystack.component.output_spec": {
|
| 63 |
+
key: {
|
| 64 |
+
"type": (value.type.__name__ if isinstance(value.type, type) else str(value.type)),
|
| 65 |
+
"receivers": value.receivers,
|
| 66 |
+
}
|
| 67 |
+
for key, value in instance.__haystack_output__._sockets_dict.items() # type: ignore
|
| 68 |
+
},
|
| 69 |
+
},
|
| 70 |
+
parent_span=parent_span,
|
| 71 |
+
) as span:
|
| 72 |
+
# We deepcopy the inputs otherwise we might lose that information
|
| 73 |
+
# when we delete them in case they're sent to other Components
|
| 74 |
+
span.set_content_tag("haystack.component.input", deepcopy(inputs))
|
| 75 |
+
logger.info("Running component {component_name}", component_name=name)
|
| 76 |
+
res: Dict[str, Any] = instance.run(**inputs)
|
| 77 |
+
self.graph.nodes[name]["visits"] += 1
|
| 78 |
+
|
| 79 |
+
# After a Component that has variadic inputs is run, we need to reset the variadic inputs that were consumed
|
| 80 |
+
for socket in instance.__haystack_input__._sockets_dict.values(): # type: ignore
|
| 81 |
+
if socket.name not in inputs:
|
| 82 |
+
continue
|
| 83 |
+
if socket.is_variadic:
|
| 84 |
+
inputs[socket.name] = []
|
| 85 |
+
|
| 86 |
+
if not isinstance(res, Mapping):
|
| 87 |
+
raise PipelineRuntimeError(
|
| 88 |
+
f"Component '{name}' didn't return a dictionary. "
|
| 89 |
+
"Components must always return dictionaries: check the documentation."
|
| 90 |
+
)
|
| 91 |
+
span.set_tag("haystack.component.visits", self.graph.nodes[name]["visits"])
|
| 92 |
+
span.set_content_tag("haystack.component.output", res)
|
| 93 |
+
|
| 94 |
+
return res
|
| 95 |
+
|
| 96 |
+
def _run_subgraph( # noqa: PLR0915
|
| 97 |
+
self,
|
| 98 |
+
cycle: List[str],
|
| 99 |
+
component_name: str,
|
| 100 |
+
components_inputs: Dict[str, Dict[str, Any]],
|
| 101 |
+
include_outputs_from: Optional[Set[str]] = None,
|
| 102 |
+
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
| 103 |
+
"""
|
| 104 |
+
Runs a `cycle` in the Pipeline starting from `component_name`.
|
| 105 |
+
|
| 106 |
+
This will return once there are no inputs for the Components in `cycle`.
|
| 107 |
+
|
| 108 |
+
This is an internal method meant to be used in `Pipeline.run()` only.
|
| 109 |
+
|
| 110 |
+
:param cycle:
|
| 111 |
+
List of Components that are part of the cycle being run
|
| 112 |
+
:param component_name:
|
| 113 |
+
Name of the Component that will start execution of the cycle
|
| 114 |
+
:param components_inputs:
|
| 115 |
+
Components inputs, this might include inputs for Components that are not part
|
| 116 |
+
of the cycle but part of the wider Pipeline's graph
|
| 117 |
+
:param include_outputs_from:
|
| 118 |
+
Set of component names whose individual outputs are to be
|
| 119 |
+
included in the cycle's output. In case a Component is executed multiple times
|
| 120 |
+
only the last-produced output is included.
|
| 121 |
+
:returns:
|
| 122 |
+
Outputs of all the Components that are not connected to other Components in `cycle`.
|
| 123 |
+
If `include_outputs_from` is set those Components' outputs will be included.
|
| 124 |
+
:raises PipelineMaxComponentRuns:
|
| 125 |
+
If a Component reaches the maximum number of times it can be run in this Pipeline
|
| 126 |
+
"""
|
| 127 |
+
waiting_queue: List[Tuple[str, Component]] = []
|
| 128 |
+
run_queue: List[Tuple[str, Component]] = []
|
| 129 |
+
|
| 130 |
+
# Create the run queue starting with the component that needs to run first
|
| 131 |
+
start_index = cycle.index(component_name)
|
| 132 |
+
for node in cycle[start_index:]:
|
| 133 |
+
run_queue.append((node, self.graph.nodes[node]["instance"]))
|
| 134 |
+
|
| 135 |
+
include_outputs_from = set() if include_outputs_from is None else include_outputs_from
|
| 136 |
+
|
| 137 |
+
before_last_waiting_queue: Optional[Set[str]] = None
|
| 138 |
+
last_waiting_queue: Optional[Set[str]] = None
|
| 139 |
+
|
| 140 |
+
subgraph_outputs = {}
|
| 141 |
+
# These are outputs that are sent to other Components but the user explicitly
|
| 142 |
+
# asked to include them in the final output.
|
| 143 |
+
extra_outputs = {}
|
| 144 |
+
|
| 145 |
+
# This variable is used to keep track if we still need to run the cycle or not.
|
| 146 |
+
# When a Component doesn't send outputs to another Component
|
| 147 |
+
# that's inside the subgraph, we stop running this subgraph.
|
| 148 |
+
cycle_received_inputs = False
|
| 149 |
+
|
| 150 |
+
while not cycle_received_inputs:
|
| 151 |
+
# Here we run the Components
|
| 152 |
+
name, comp = run_queue.pop(0)
|
| 153 |
+
if _is_lazy_variadic(comp) and not all(_is_lazy_variadic(comp) for _, comp in run_queue):
|
| 154 |
+
# We run Components with lazy variadic inputs only if there only Components with
|
| 155 |
+
# lazy variadic inputs left to run
|
| 156 |
+
_enqueue_waiting_component((name, comp), waiting_queue)
|
| 157 |
+
continue
|
| 158 |
+
|
| 159 |
+
# As soon as a Component returns only output that is not part of the cycle, we can stop
|
| 160 |
+
if self._component_has_enough_inputs_to_run(name, components_inputs):
|
| 161 |
+
if self.graph.nodes[name]["visits"] > self._max_runs_per_component:
|
| 162 |
+
msg = f"Maximum run count {self._max_runs_per_component} reached for component '{name}'"
|
| 163 |
+
raise PipelineMaxComponentRuns(msg)
|
| 164 |
+
|
| 165 |
+
res: Dict[str, Any] = self._run_component(name, components_inputs[name])
|
| 166 |
+
|
| 167 |
+
# Delete the inputs that were consumed by the Component and are not received from
|
| 168 |
+
# the user or from Components that are part of this cycle
|
| 169 |
+
sockets = list(components_inputs[name].keys())
|
| 170 |
+
for socket_name in sockets:
|
| 171 |
+
senders = comp.__haystack_input__._sockets_dict[socket_name].senders # type: ignore
|
| 172 |
+
if not senders:
|
| 173 |
+
# We keep inputs that came from the user
|
| 174 |
+
continue
|
| 175 |
+
all_senders_in_cycle = all(sender in cycle for sender in senders)
|
| 176 |
+
if all_senders_in_cycle:
|
| 177 |
+
# All senders are in the cycle, we can remove the input.
|
| 178 |
+
# We'll receive it later at a certain point.
|
| 179 |
+
del components_inputs[name][socket_name]
|
| 180 |
+
|
| 181 |
+
if name in include_outputs_from:
|
| 182 |
+
# Deepcopy the outputs to prevent downstream nodes from modifying them
|
| 183 |
+
# We don't care about loops - Always store the last output.
|
| 184 |
+
extra_outputs[name] = deepcopy(res)
|
| 185 |
+
|
| 186 |
+
# Reset the waiting for input previous states, we managed to run a component
|
| 187 |
+
before_last_waiting_queue = None
|
| 188 |
+
last_waiting_queue = None
|
| 189 |
+
|
| 190 |
+
# Check if a component doesn't send any output to components that are part of the cycle
|
| 191 |
+
final_output_reached = False
|
| 192 |
+
for output_socket in res.keys():
|
| 193 |
+
for receiver in comp.__haystack_output__._sockets_dict[output_socket].receivers: # type: ignore
|
| 194 |
+
if receiver in cycle:
|
| 195 |
+
final_output_reached = True
|
| 196 |
+
break
|
| 197 |
+
if final_output_reached:
|
| 198 |
+
break
|
| 199 |
+
|
| 200 |
+
if not final_output_reached:
|
| 201 |
+
# We stop only if the Component we just ran doesn't send any output to sockets that
|
| 202 |
+
# are part of the cycle
|
| 203 |
+
cycle_received_inputs = True
|
| 204 |
+
|
| 205 |
+
# We manage to run this component that was in the waiting list, we can remove it.
|
| 206 |
+
# This happens when a component was put in the waiting list but we reached it from another edge.
|
| 207 |
+
_dequeue_waiting_component((name, comp), waiting_queue)
|
| 208 |
+
for pair in self._find_components_that_will_receive_no_input(name, res, components_inputs):
|
| 209 |
+
_dequeue_component(pair, run_queue, waiting_queue)
|
| 210 |
+
|
| 211 |
+
receivers = [item for item in self._find_receivers_from(name) if item[0] in cycle]
|
| 212 |
+
|
| 213 |
+
res = self._distribute_output(receivers, res, components_inputs, run_queue, waiting_queue)
|
| 214 |
+
|
| 215 |
+
# We treat a cycle as a completely independent graph, so we keep track of output
|
| 216 |
+
# that is not sent inside the cycle.
|
| 217 |
+
# This output is going to get distributed to the wider graph after we finish running
|
| 218 |
+
# a cycle.
|
| 219 |
+
# All values that are left at this point go outside the cycle.
|
| 220 |
+
if len(res) > 0:
|
| 221 |
+
subgraph_outputs[name] = res
|
| 222 |
+
else:
|
| 223 |
+
# This component doesn't have enough inputs so we can't run it yet
|
| 224 |
+
_enqueue_waiting_component((name, comp), waiting_queue)
|
| 225 |
+
|
| 226 |
+
if len(run_queue) == 0 and len(waiting_queue) > 0:
|
| 227 |
+
# Check if we're stuck in a loop.
|
| 228 |
+
# It's important to check whether previous waitings are None as it could be that no
|
| 229 |
+
# Component has actually been run yet.
|
| 230 |
+
if (
|
| 231 |
+
before_last_waiting_queue is not None
|
| 232 |
+
and last_waiting_queue is not None
|
| 233 |
+
and before_last_waiting_queue == last_waiting_queue
|
| 234 |
+
):
|
| 235 |
+
if self._is_stuck_in_a_loop(waiting_queue):
|
| 236 |
+
# We're stuck! We can't make any progress.
|
| 237 |
+
msg = (
|
| 238 |
+
"Pipeline is stuck running in a loop. Partial outputs will be returned. "
|
| 239 |
+
"Check the Pipeline graph for possible issues."
|
| 240 |
+
)
|
| 241 |
+
warn(RuntimeWarning(msg))
|
| 242 |
+
break
|
| 243 |
+
|
| 244 |
+
(name, comp) = self._find_next_runnable_lazy_variadic_or_default_component(waiting_queue)
|
| 245 |
+
_add_missing_input_defaults(name, comp, components_inputs)
|
| 246 |
+
_enqueue_component((name, comp), run_queue, waiting_queue)
|
| 247 |
+
continue
|
| 248 |
+
|
| 249 |
+
before_last_waiting_queue = last_waiting_queue.copy() if last_waiting_queue is not None else None
|
| 250 |
+
last_waiting_queue = {item[0] for item in waiting_queue}
|
| 251 |
+
|
| 252 |
+
(name, comp) = self._find_next_runnable_component(components_inputs, waiting_queue)
|
| 253 |
+
_add_missing_input_defaults(name, comp, components_inputs)
|
| 254 |
+
_enqueue_component((name, comp), run_queue, waiting_queue)
|
| 255 |
+
|
| 256 |
+
return subgraph_outputs, extra_outputs
|
| 257 |
+
|
| 258 |
+
def run( # noqa: PLR0915, PLR0912
|
| 259 |
+
self, data: Dict[str, Any], include_outputs_from: Optional[Set[str]] = None
|
| 260 |
+
) -> Dict[str, Any]:
|
| 261 |
+
"""
|
| 262 |
+
Runs the Pipeline with given input data.
|
| 263 |
+
|
| 264 |
+
Usage:
|
| 265 |
+
```python
|
| 266 |
+
from haystack import Pipeline, Document
|
| 267 |
+
from haystack.utils import Secret
|
| 268 |
+
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
| 269 |
+
from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
|
| 270 |
+
from haystack.components.generators import OpenAIGenerator
|
| 271 |
+
from haystack.components.builders.answer_builder import AnswerBuilder
|
| 272 |
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
| 273 |
+
|
| 274 |
+
# Write documents to InMemoryDocumentStore
|
| 275 |
+
document_store = InMemoryDocumentStore()
|
| 276 |
+
document_store.write_documents([
|
| 277 |
+
Document(content="My name is Jean and I live in Paris."),
|
| 278 |
+
Document(content="My name is Mark and I live in Berlin."),
|
| 279 |
+
Document(content="My name is Giorgio and I live in Rome.")
|
| 280 |
+
])
|
| 281 |
+
|
| 282 |
+
prompt_template = \"\"\"
|
| 283 |
+
Given these documents, answer the question.
|
| 284 |
+
Documents:
|
| 285 |
+
{% for doc in documents %}
|
| 286 |
+
{{ doc.content }}
|
| 287 |
+
{% endfor %}
|
| 288 |
+
Question: {{question}}
|
| 289 |
+
Answer:
|
| 290 |
+
\"\"\"
|
| 291 |
+
|
| 292 |
+
retriever = InMemoryBM25Retriever(document_store=document_store)
|
| 293 |
+
prompt_builder = PromptBuilder(template=prompt_template)
|
| 294 |
+
llm = OpenAIGenerator(api_key=Secret.from_token(api_key))
|
| 295 |
+
|
| 296 |
+
rag_pipeline = Pipeline()
|
| 297 |
+
rag_pipeline.add_component("retriever", retriever)
|
| 298 |
+
rag_pipeline.add_component("prompt_builder", prompt_builder)
|
| 299 |
+
rag_pipeline.add_component("llm", llm)
|
| 300 |
+
rag_pipeline.connect("retriever", "prompt_builder.documents")
|
| 301 |
+
rag_pipeline.connect("prompt_builder", "llm")
|
| 302 |
+
|
| 303 |
+
# Ask a question
|
| 304 |
+
question = "Who lives in Paris?"
|
| 305 |
+
results = rag_pipeline.run(
|
| 306 |
+
{
|
| 307 |
+
"retriever": {"query": question},
|
| 308 |
+
"prompt_builder": {"question": question},
|
| 309 |
+
}
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
print(results["llm"]["replies"])
|
| 313 |
+
# Jean lives in Paris
|
| 314 |
+
```
|
| 315 |
+
|
| 316 |
+
:param data:
|
| 317 |
+
A dictionary of inputs for the pipeline's components. Each key is a component name
|
| 318 |
+
and its value is a dictionary of that component's input parameters:
|
| 319 |
+
```
|
| 320 |
+
data = {
|
| 321 |
+
"comp1": {"input1": 1, "input2": 2},
|
| 322 |
+
}
|
| 323 |
+
```
|
| 324 |
+
For convenience, this format is also supported when input names are unique:
|
| 325 |
+
```
|
| 326 |
+
data = {
|
| 327 |
+
"input1": 1, "input2": 2,
|
| 328 |
+
}
|
| 329 |
+
```
|
| 330 |
+
:param include_outputs_from:
|
| 331 |
+
Set of component names whose individual outputs are to be
|
| 332 |
+
included in the pipeline's output. For components that are
|
| 333 |
+
invoked multiple times (in a loop), only the last-produced
|
| 334 |
+
output is included.
|
| 335 |
+
:returns:
|
| 336 |
+
A dictionary where each entry corresponds to a component name
|
| 337 |
+
and its output. If `include_outputs_from` is `None`, this dictionary
|
| 338 |
+
will only contain the outputs of leaf components, i.e., components
|
| 339 |
+
without outgoing connections.
|
| 340 |
+
|
| 341 |
+
:raises PipelineRuntimeError:
|
| 342 |
+
If the Pipeline contains cycles with unsupported connections that would cause
|
| 343 |
+
it to get stuck and fail running.
|
| 344 |
+
Or if a Component fails or returns output in an unsupported type.
|
| 345 |
+
:raises PipelineMaxComponentRuns:
|
| 346 |
+
If a Component reaches the maximum number of times it can be run in this Pipeline.
|
| 347 |
+
"""
|
| 348 |
+
pipeline_running(self)
|
| 349 |
+
|
| 350 |
+
# Reset the visits count for each component
|
| 351 |
+
self._init_graph()
|
| 352 |
+
|
| 353 |
+
# TODO: Remove this warmup once we can check reliably whether a component has been warmed up or not
|
| 354 |
+
# As of now it's here to make sure we don't have failing tests that assume warm_up() is called in run()
|
| 355 |
+
self.warm_up()
|
| 356 |
+
|
| 357 |
+
# normalize `data`
|
| 358 |
+
data = self._prepare_component_input_data(data)
|
| 359 |
+
|
| 360 |
+
# Raise if input is malformed in some way
|
| 361 |
+
self._validate_input(data)
|
| 362 |
+
|
| 363 |
+
# Normalize the input data
|
| 364 |
+
components_inputs: Dict[str, Dict[str, Any]] = self._normalize_varidiac_input_data(data)
|
| 365 |
+
|
| 366 |
+
# These variables are used to detect when we're stuck in a loop.
|
| 367 |
+
# Stuck loops can happen when one or more components are waiting for input but
|
| 368 |
+
# no other component is going to run.
|
| 369 |
+
# This can happen when a whole branch of the graph is skipped for example.
|
| 370 |
+
# When we find that two consecutive iterations of the loop where the waiting_queue is the same,
|
| 371 |
+
# we know we're stuck in a loop and we can't make any progress.
|
| 372 |
+
#
|
| 373 |
+
# They track the previous two states of the waiting_queue. So if waiting_queue would n,
|
| 374 |
+
# before_last_waiting_queue would be n-2 and last_waiting_queue would be n-1.
|
| 375 |
+
# When we run a component, we reset both.
|
| 376 |
+
before_last_waiting_queue: Optional[Set[str]] = None
|
| 377 |
+
last_waiting_queue: Optional[Set[str]] = None
|
| 378 |
+
|
| 379 |
+
# The waiting_for_input list is used to keep track of components that are waiting for input.
|
| 380 |
+
waiting_queue: List[Tuple[str, Component]] = []
|
| 381 |
+
|
| 382 |
+
include_outputs_from = set() if include_outputs_from is None else include_outputs_from
|
| 383 |
+
|
| 384 |
+
# This is what we'll return at the end
|
| 385 |
+
final_outputs: Dict[Any, Any] = {}
|
| 386 |
+
|
| 387 |
+
# Break cycles in case there are, this is a noop if no cycle is found.
|
| 388 |
+
# This will raise if a cycle can't be broken.
|
| 389 |
+
graph_without_cycles, components_in_cycles = self._break_supported_cycles_in_graph()
|
| 390 |
+
|
| 391 |
+
run_queue: List[Tuple[str, Component]] = []
|
| 392 |
+
for node in nx.topological_sort(graph_without_cycles):
|
| 393 |
+
run_queue.append((node, self.graph.nodes[node]["instance"]))
|
| 394 |
+
|
| 395 |
+
# Set defaults inputs for those sockets that don't receive input neither from the user
|
| 396 |
+
# nor from other Components.
|
| 397 |
+
# If they have no default nothing is done.
|
| 398 |
+
# This is important to ensure correct order execution, otherwise some variadic
|
| 399 |
+
# Components that receive input from the user might be run before than they should.
|
| 400 |
+
for name, comp in self.graph.nodes(data="instance"):
|
| 401 |
+
if name not in components_inputs:
|
| 402 |
+
components_inputs[name] = {}
|
| 403 |
+
for socket_name, socket in comp.__haystack_input__._sockets_dict.items():
|
| 404 |
+
if socket_name in components_inputs[name]:
|
| 405 |
+
continue
|
| 406 |
+
if not socket.senders:
|
| 407 |
+
value = socket.default_value
|
| 408 |
+
if socket.is_variadic:
|
| 409 |
+
value = [value]
|
| 410 |
+
components_inputs[name][socket_name] = value
|
| 411 |
+
|
| 412 |
+
with tracing.tracer.trace(
|
| 413 |
+
"haystack.pipeline.run",
|
| 414 |
+
tags={
|
| 415 |
+
"haystack.pipeline.input_data": data,
|
| 416 |
+
"haystack.pipeline.output_data": final_outputs,
|
| 417 |
+
"haystack.pipeline.metadata": self.metadata,
|
| 418 |
+
"haystack.pipeline.max_runs_per_component": self._max_runs_per_component,
|
| 419 |
+
},
|
| 420 |
+
) as span:
|
| 421 |
+
# Cache for extra outputs, if enabled.
|
| 422 |
+
extra_outputs: Dict[Any, Any] = {}
|
| 423 |
+
|
| 424 |
+
while len(run_queue) > 0:
|
| 425 |
+
name, comp = run_queue.pop(0)
|
| 426 |
+
|
| 427 |
+
if _is_lazy_variadic(comp) and not all(_is_lazy_variadic(comp) for _, comp in run_queue):
|
| 428 |
+
# We run Components with lazy variadic inputs only if there only Components with
|
| 429 |
+
# lazy variadic inputs left to run
|
| 430 |
+
_enqueue_waiting_component((name, comp), waiting_queue)
|
| 431 |
+
continue
|
| 432 |
+
if self._component_has_enough_inputs_to_run(name, components_inputs) and components_in_cycles.get(
|
| 433 |
+
name, []
|
| 434 |
+
):
|
| 435 |
+
cycles = components_in_cycles.get(name, [])
|
| 436 |
+
|
| 437 |
+
# This component is part of one or more cycles, let's get the first one and run it.
|
| 438 |
+
# We can reliably pick any of the cycles if there are multiple ones, the way cycles
|
| 439 |
+
# are run doesn't make a different whether we pick the first or any of the others a
|
| 440 |
+
# Component is part of.
|
| 441 |
+
subgraph_output, subgraph_extra_output = self._run_subgraph(
|
| 442 |
+
cycles[0], name, components_inputs, include_outputs_from
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# After a cycle is run the previous run_queue can't be correct anymore cause it's
|
| 446 |
+
# not modified when running the subgraph.
|
| 447 |
+
# So we reset it given the output returned by the subgraph.
|
| 448 |
+
run_queue = []
|
| 449 |
+
|
| 450 |
+
# Reset the waiting for input previous states, we managed to run at least one component
|
| 451 |
+
before_last_waiting_queue = None
|
| 452 |
+
last_waiting_queue = None
|
| 453 |
+
|
| 454 |
+
# Merge the extra outputs
|
| 455 |
+
extra_outputs.update(subgraph_extra_output)
|
| 456 |
+
|
| 457 |
+
for component_name, component_output in subgraph_output.items():
|
| 458 |
+
receivers = self._find_receivers_from(component_name)
|
| 459 |
+
component_output = self._distribute_output(
|
| 460 |
+
receivers, component_output, components_inputs, run_queue, waiting_queue
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
if len(component_output) > 0:
|
| 464 |
+
final_outputs[component_name] = component_output
|
| 465 |
+
|
| 466 |
+
elif self._component_has_enough_inputs_to_run(name, components_inputs):
|
| 467 |
+
if self.graph.nodes[name]["visits"] > self._max_runs_per_component:
|
| 468 |
+
msg = f"Maximum run count {self._max_runs_per_component} reached for component '{name}'"
|
| 469 |
+
raise PipelineMaxComponentRuns(msg)
|
| 470 |
+
|
| 471 |
+
res: Dict[str, Any] = self._run_component(name, components_inputs[name], parent_span=span)
|
| 472 |
+
|
| 473 |
+
# Delete the inputs that were consumed by the Component and are not received from the user
|
| 474 |
+
sockets = list(components_inputs[name].keys())
|
| 475 |
+
for socket_name in sockets:
|
| 476 |
+
senders = comp.__haystack_input__._sockets_dict[socket_name].senders
|
| 477 |
+
if senders:
|
| 478 |
+
# Delete all inputs that are received from other Components
|
| 479 |
+
del components_inputs[name][socket_name]
|
| 480 |
+
# We keep inputs that came from the user
|
| 481 |
+
|
| 482 |
+
if name in include_outputs_from:
|
| 483 |
+
# Deepcopy the outputs to prevent downstream nodes from modifying them
|
| 484 |
+
# We don't care about loops - Always store the last output.
|
| 485 |
+
extra_outputs[name] = deepcopy(res)
|
| 486 |
+
|
| 487 |
+
# Reset the waiting for input previous states, we managed to run a component
|
| 488 |
+
before_last_waiting_queue = None
|
| 489 |
+
last_waiting_queue = None
|
| 490 |
+
|
| 491 |
+
# We manage to run this component that was in the waiting list, we can remove it.
|
| 492 |
+
# This happens when a component was put in the waiting list but we reached it from another edge.
|
| 493 |
+
_dequeue_waiting_component((name, comp), waiting_queue)
|
| 494 |
+
|
| 495 |
+
for pair in self._find_components_that_will_receive_no_input(name, res, components_inputs):
|
| 496 |
+
_dequeue_component(pair, run_queue, waiting_queue)
|
| 497 |
+
receivers = self._find_receivers_from(name)
|
| 498 |
+
res = self._distribute_output(receivers, res, components_inputs, run_queue, waiting_queue)
|
| 499 |
+
|
| 500 |
+
if len(res) > 0:
|
| 501 |
+
final_outputs[name] = res
|
| 502 |
+
else:
|
| 503 |
+
# This component doesn't have enough inputs so we can't run it yet
|
| 504 |
+
_enqueue_waiting_component((name, comp), waiting_queue)
|
| 505 |
+
|
| 506 |
+
if len(run_queue) == 0 and len(waiting_queue) > 0:
|
| 507 |
+
# Check if we're stuck in a loop.
|
| 508 |
+
# It's important to check whether previous waitings are None as it could be that no
|
| 509 |
+
# Component has actually been run yet.
|
| 510 |
+
if (
|
| 511 |
+
before_last_waiting_queue is not None
|
| 512 |
+
and last_waiting_queue is not None
|
| 513 |
+
and before_last_waiting_queue == last_waiting_queue
|
| 514 |
+
):
|
| 515 |
+
if self._is_stuck_in_a_loop(waiting_queue):
|
| 516 |
+
# We're stuck! We can't make any progress.
|
| 517 |
+
msg = (
|
| 518 |
+
"Pipeline is stuck running in a loop. Partial outputs will be returned. "
|
| 519 |
+
"Check the Pipeline graph for possible issues."
|
| 520 |
+
)
|
| 521 |
+
warn(RuntimeWarning(msg))
|
| 522 |
+
break
|
| 523 |
+
|
| 524 |
+
(name, comp) = self._find_next_runnable_lazy_variadic_or_default_component(waiting_queue)
|
| 525 |
+
_add_missing_input_defaults(name, comp, components_inputs)
|
| 526 |
+
_enqueue_component((name, comp), run_queue, waiting_queue)
|
| 527 |
+
continue
|
| 528 |
+
|
| 529 |
+
before_last_waiting_queue = last_waiting_queue.copy() if last_waiting_queue is not None else None
|
| 530 |
+
last_waiting_queue = {item[0] for item in waiting_queue}
|
| 531 |
+
|
| 532 |
+
(name, comp) = self._find_next_runnable_component(components_inputs, waiting_queue)
|
| 533 |
+
_add_missing_input_defaults(name, comp, components_inputs)
|
| 534 |
+
_enqueue_component((name, comp), run_queue, waiting_queue)
|
| 535 |
+
|
| 536 |
+
if len(include_outputs_from) > 0:
|
| 537 |
+
for name, output in extra_outputs.items():
|
| 538 |
+
inner = final_outputs.get(name)
|
| 539 |
+
if inner is None:
|
| 540 |
+
final_outputs[name] = output
|
| 541 |
+
else:
|
| 542 |
+
# Let's not override any keys that are already
|
| 543 |
+
# in the final_outputs as they might be different
|
| 544 |
+
# from what we cached in extra_outputs, e.g. when loops
|
| 545 |
+
# are involved.
|
| 546 |
+
for k, v in output.items():
|
| 547 |
+
if k not in inner:
|
| 548 |
+
inner[k] = v
|
| 549 |
+
|
| 550 |
+
return final_outputs
|
testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/chat_with_website.yaml.jinja2
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
components:
|
| 2 |
+
converter:
|
| 3 |
+
type: haystack.components.converters.html.HTMLToDocument
|
| 4 |
+
init_parameters:
|
| 5 |
+
extraction_kwargs: null
|
| 6 |
+
|
| 7 |
+
fetcher:
|
| 8 |
+
init_parameters:
|
| 9 |
+
raise_on_failure: true
|
| 10 |
+
retry_attempts: 2
|
| 11 |
+
timeout: 3
|
| 12 |
+
user_agents:
|
| 13 |
+
- haystack/LinkContentFetcher/2.0.0b8
|
| 14 |
+
type: haystack.components.fetchers.link_content.LinkContentFetcher
|
| 15 |
+
|
| 16 |
+
llm:
|
| 17 |
+
init_parameters:
|
| 18 |
+
api_base_url: null
|
| 19 |
+
api_key:
|
| 20 |
+
env_vars:
|
| 21 |
+
- OPENAI_API_KEY
|
| 22 |
+
strict: true
|
| 23 |
+
type: env_var
|
| 24 |
+
generation_kwargs: {}
|
| 25 |
+
model: gpt-4o-mini
|
| 26 |
+
streaming_callback: null
|
| 27 |
+
system_prompt: null
|
| 28 |
+
type: haystack.components.generators.openai.OpenAIGenerator
|
| 29 |
+
|
| 30 |
+
prompt:
|
| 31 |
+
init_parameters:
|
| 32 |
+
template: |
|
| 33 |
+
{% raw %}
|
| 34 |
+
"According to the contents of this website:
|
| 35 |
+
{% for document in documents %}
|
| 36 |
+
{{document.content}}
|
| 37 |
+
{% endfor %}
|
| 38 |
+
Answer the given question: {{query}}
|
| 39 |
+
Answer:
|
| 40 |
+
"
|
| 41 |
+
{% endraw %}
|
| 42 |
+
type: haystack.components.builders.prompt_builder.PromptBuilder
|
| 43 |
+
|
| 44 |
+
connections:
|
| 45 |
+
- receiver: converter.sources
|
| 46 |
+
sender: fetcher.streams
|
| 47 |
+
- receiver: prompt.documents
|
| 48 |
+
sender: converter.documents
|
| 49 |
+
- receiver: llm.prompt
|
| 50 |
+
sender: prompt.prompt
|
| 51 |
+
|
| 52 |
+
metadata: {}
|
testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/generative_qa.yaml.jinja2
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
|
| 3 |
+
components:
|
| 4 |
+
generator:
|
| 5 |
+
init_parameters:
|
| 6 |
+
api_key:
|
| 7 |
+
env_vars: [ "OPENAI_API_KEY" ]
|
| 8 |
+
strict: true
|
| 9 |
+
type: "env_var"
|
| 10 |
+
model: "gpt-4o-mini"
|
| 11 |
+
type: "haystack.components.generators.openai.OpenAIGenerator"
|
| 12 |
+
|
| 13 |
+
prompt_builder:
|
| 14 |
+
init_parameters:
|
| 15 |
+
template: "{% raw %}Answer the question {{question}}.\n Answer:\n{% endraw %}"
|
| 16 |
+
type: "haystack.components.builders.prompt_builder.PromptBuilder"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
connections:
|
| 20 |
+
- receiver: generator.prompt
|
| 21 |
+
sender: prompt_builder.prompt
|
| 22 |
+
|
| 23 |
+
metadata:
|
| 24 |
+
{}
|
testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/indexing.yaml.jinja2
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
|
| 3 |
+
components:
|
| 4 |
+
cleaner:
|
| 5 |
+
init_parameters:
|
| 6 |
+
remove_empty_lines: true
|
| 7 |
+
remove_extra_whitespaces: true
|
| 8 |
+
remove_regex: null
|
| 9 |
+
remove_repeated_substrings: false
|
| 10 |
+
remove_substrings: null
|
| 11 |
+
type: haystack.components.preprocessors.document_cleaner.DocumentCleaner
|
| 12 |
+
|
| 13 |
+
converter:
|
| 14 |
+
init_parameters:
|
| 15 |
+
encoding: utf-8
|
| 16 |
+
type: haystack.components.converters.txt.TextFileToDocument
|
| 17 |
+
|
| 18 |
+
embedder:
|
| 19 |
+
init_parameters:
|
| 20 |
+
api_base_url: null
|
| 21 |
+
api_key:
|
| 22 |
+
env_vars:
|
| 23 |
+
- OPENAI_API_KEY
|
| 24 |
+
strict: true
|
| 25 |
+
type: env_var
|
| 26 |
+
batch_size: 32
|
| 27 |
+
dimensions: null
|
| 28 |
+
embedding_separator: '\n'
|
| 29 |
+
meta_fields_to_embed: []
|
| 30 |
+
model: text-embedding-ada-002
|
| 31 |
+
organization: null
|
| 32 |
+
prefix: ''
|
| 33 |
+
progress_bar: true
|
| 34 |
+
suffix: ''
|
| 35 |
+
type: haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder
|
| 36 |
+
|
| 37 |
+
splitter:
|
| 38 |
+
init_parameters:
|
| 39 |
+
split_by: word
|
| 40 |
+
split_length: 200
|
| 41 |
+
split_overlap: 0
|
| 42 |
+
type: haystack.components.preprocessors.document_splitter.DocumentSplitter
|
| 43 |
+
|
| 44 |
+
writer:
|
| 45 |
+
init_parameters:
|
| 46 |
+
document_store:
|
| 47 |
+
init_parameters:
|
| 48 |
+
bm25_tokenization_regex: (?u)\b\w\w+\b
|
| 49 |
+
bm25_algorithm: BM25L
|
| 50 |
+
bm25_parameters: {}
|
| 51 |
+
embedding_similarity_function: dot_product
|
| 52 |
+
index: documents
|
| 53 |
+
type: haystack.document_stores.in_memory.document_store.InMemoryDocumentStore
|
| 54 |
+
policy: NONE
|
| 55 |
+
type: haystack.components.writers.document_writer.DocumentWriter
|
| 56 |
+
|
| 57 |
+
connections:
|
| 58 |
+
- receiver: cleaner.documents
|
| 59 |
+
sender: converter.documents
|
| 60 |
+
- receiver: splitter.documents
|
| 61 |
+
sender: cleaner.documents
|
| 62 |
+
- receiver: embedder.documents
|
| 63 |
+
sender: splitter.documents
|
| 64 |
+
- receiver: writer.documents
|
| 65 |
+
sender: embedder.documents
|
| 66 |
+
|
| 67 |
+
metadata: {}
|
testbed/deepset-ai__haystack/haystack/core/pipeline/predefined/rag.yaml.jinja2
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
|
| 3 |
+
components:
|
| 4 |
+
llm:
|
| 5 |
+
init_parameters:
|
| 6 |
+
api_base_url: null
|
| 7 |
+
api_key:
|
| 8 |
+
env_vars:
|
| 9 |
+
- OPENAI_API_KEY
|
| 10 |
+
strict: true
|
| 11 |
+
type: env_var
|
| 12 |
+
generation_kwargs: {}
|
| 13 |
+
model: gpt-4o-mini
|
| 14 |
+
streaming_callback: null
|
| 15 |
+
system_prompt: null
|
| 16 |
+
type: haystack.components.generators.openai.OpenAIGenerator
|
| 17 |
+
|
| 18 |
+
prompt_builder:
|
| 19 |
+
init_parameters:
|
| 20 |
+
template: |
|
| 21 |
+
{% raw %}
|
| 22 |
+
"Given these documents, answer the question.
|
| 23 |
+
Documents:
|
| 24 |
+
{% for doc in documents %}\
|
| 25 |
+
{{ doc.content }}
|
| 26 |
+
{% endfor %}
|
| 27 |
+
Question: {{query}}
|
| 28 |
+
|
| 29 |
+
Answer:"
|
| 30 |
+
{% endraw %}
|
| 31 |
+
type: haystack.components.builders.prompt_builder.PromptBuilder
|
| 32 |
+
|
| 33 |
+
retriever:
|
| 34 |
+
init_parameters:
|
| 35 |
+
document_store:
|
| 36 |
+
init_parameters:
|
| 37 |
+
bm25_tokenization_regex: (?u)\b\w\w+\b
|
| 38 |
+
bm25_algorithm: BM25L
|
| 39 |
+
bm25_parameters: {}
|
| 40 |
+
embedding_similarity_function: dot_product
|
| 41 |
+
index: documents
|
| 42 |
+
type: haystack.document_stores.in_memory.document_store.InMemoryDocumentStore
|
| 43 |
+
filters: null
|
| 44 |
+
top_k: 10
|
| 45 |
+
type: haystack.components.retrievers.in_memory.embedding_retriever.InMemoryEmbeddingRetriever
|
| 46 |
+
|
| 47 |
+
text_embedder:
|
| 48 |
+
init_parameters:
|
| 49 |
+
api_base_url: null
|
| 50 |
+
api_key:
|
| 51 |
+
env_vars:
|
| 52 |
+
- OPENAI_API_KEY
|
| 53 |
+
strict: true
|
| 54 |
+
type: env_var
|
| 55 |
+
dimensions: null
|
| 56 |
+
model: text-embedding-ada-002
|
| 57 |
+
organization: null
|
| 58 |
+
prefix: ''
|
| 59 |
+
suffix: ''
|
| 60 |
+
type: haystack.components.embedders.openai_text_embedder.OpenAITextEmbedder
|
| 61 |
+
|
| 62 |
+
connections:
|
| 63 |
+
- receiver: retriever.query_embedding
|
| 64 |
+
sender: text_embedder.embedding
|
| 65 |
+
- receiver: prompt_builder.documents
|
| 66 |
+
sender: retriever.documents
|
| 67 |
+
- receiver: llm.prompt
|
| 68 |
+
sender: prompt_builder.prompt
|
| 69 |
+
|
| 70 |
+
metadata: {}
|
testbed/deepset-ai__haystack/haystack/core/pipeline/template.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from enum import Enum
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any, Dict, Optional, Union
|
| 8 |
+
|
| 9 |
+
from jinja2 import PackageLoader, TemplateSyntaxError, meta
|
| 10 |
+
from jinja2.sandbox import SandboxedEnvironment
|
| 11 |
+
|
| 12 |
+
TEMPLATE_FILE_EXTENSION = ".yaml.jinja2"
|
| 13 |
+
TEMPLATE_HOME_DIR = Path(__file__).resolve().parent / "predefined"
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class PredefinedPipeline(Enum):
|
| 17 |
+
"""
|
| 18 |
+
Enumeration of predefined pipeline templates that can be used to create a `PipelineTemplate`.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# Maintain 1-to-1 mapping between the enum name and the template file name in templates directory
|
| 22 |
+
GENERATIVE_QA = "generative_qa"
|
| 23 |
+
RAG = "rag"
|
| 24 |
+
INDEXING = "indexing"
|
| 25 |
+
CHAT_WITH_WEBSITE = "chat_with_website"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class PipelineTemplate:
|
| 29 |
+
"""
|
| 30 |
+
The PipelineTemplate enables the creation of flexible and configurable pipelines.
|
| 31 |
+
|
| 32 |
+
The PipelineTemplate class enables the straightforward creation of flexible and configurable pipelines using
|
| 33 |
+
Jinja2 templated YAML files. Specifically designed to simplify the setup of complex data processing pipelines for
|
| 34 |
+
a range of NLP tasks—including question answering, retriever augmented generation (RAG), document indexing, among
|
| 35 |
+
others - PipelineTemplate empowers users to dynamically generate pipeline configurations from templates and
|
| 36 |
+
customize components as necessary. Its design philosophy centers on providing an accessible, yet powerful, tool
|
| 37 |
+
for constructing pipelines that accommodate both common use cases and specialized requirements with ease.
|
| 38 |
+
|
| 39 |
+
Examples of usage:
|
| 40 |
+
|
| 41 |
+
- **Default Build**: Instantiating a pipeline with default settings for a "question answering" (qa) task.
|
| 42 |
+
```python
|
| 43 |
+
from haystack.templates import PipelineTemplate, PredefinedPipeline
|
| 44 |
+
|
| 45 |
+
# Create a pipeline with default components for an extractive QA task
|
| 46 |
+
pipe = PipelineTemplate.from_predefined(PredefinedPipeline.GENERATIVE_QA).build()
|
| 47 |
+
print(pipe.run(data={"question": "What's the capital of Bosnia and Herzegovina? Be brief"}))
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
- **Customizing for Specific Tasks**: Building a pipeline for document indexing with specific components tailored
|
| 51 |
+
to the task.
|
| 52 |
+
```python
|
| 53 |
+
from haystack.components.embedders import SentenceTransformersDocumentEmbedder
|
| 54 |
+
from haystack.templates import PipelineTemplate, PredefinedPipeline
|
| 55 |
+
|
| 56 |
+
# Customize the pipeline for document indexing with specific components, include PDF file converter
|
| 57 |
+
pt = PipelineTemplate.from_predefined(PredefinedTemplate.INDEXING)
|
| 58 |
+
pipe = pt.build(template_params={"use_pdf_file_converter": True})
|
| 59 |
+
|
| 60 |
+
result = pipe.run(data={"sources": ["some_text_file.txt", "another_pdf_file.pdf"]})
|
| 61 |
+
print(result)
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
The `PipelineTemplate` is designed to offer both ease of use for common pipeline configurations and the
|
| 65 |
+
flexibility to customize and extend pipelines as required by advanced users and specific use cases.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(self, template_content: str):
|
| 69 |
+
"""
|
| 70 |
+
Initialize a PipelineTemplate.
|
| 71 |
+
|
| 72 |
+
Besides calling the constructor directly, a set of utility methods is provided to conveniently create an
|
| 73 |
+
instance of `PipelineTemplate` from different sources. See `from_string`, `from_file`, `from_predefined`
|
| 74 |
+
and `from_url`.
|
| 75 |
+
|
| 76 |
+
:param template_content: The raw template source to use in the template.
|
| 77 |
+
"""
|
| 78 |
+
env = SandboxedEnvironment(
|
| 79 |
+
loader=PackageLoader("haystack.core.pipeline", "predefined"), trim_blocks=True, lstrip_blocks=True
|
| 80 |
+
)
|
| 81 |
+
try:
|
| 82 |
+
self._template = env.from_string(template_content)
|
| 83 |
+
except TemplateSyntaxError as e:
|
| 84 |
+
raise ValueError(f"Invalid pipeline template: {e.message}") from e
|
| 85 |
+
|
| 86 |
+
# Store the list of undefined variables in the template. Components' names will be part of this list
|
| 87 |
+
self.template_variables = meta.find_undeclared_variables(env.parse(template_content))
|
| 88 |
+
self._template_content = template_content
|
| 89 |
+
|
| 90 |
+
def render(self, template_params: Optional[Dict[str, Any]] = None) -> str:
|
| 91 |
+
"""
|
| 92 |
+
Constructs a `Pipeline` instance based on the template.
|
| 93 |
+
|
| 94 |
+
:param template_params: An optional dictionary of parameters to use when rendering the pipeline template.
|
| 95 |
+
|
| 96 |
+
:returns: An instance of `Pipeline` constructed from the rendered template and custom component configurations.
|
| 97 |
+
"""
|
| 98 |
+
template_params = template_params or {}
|
| 99 |
+
return self._template.render(**template_params)
|
| 100 |
+
|
| 101 |
+
@classmethod
|
| 102 |
+
def from_file(cls, file_path: Union[Path, str]) -> "PipelineTemplate":
|
| 103 |
+
"""
|
| 104 |
+
Create a PipelineTemplate from a file.
|
| 105 |
+
|
| 106 |
+
:param file_path: The path to the file containing the template. Must contain valid Jinja2 syntax.
|
| 107 |
+
:returns: An instance of `PipelineTemplate`.
|
| 108 |
+
"""
|
| 109 |
+
with open(file_path, "r") as file:
|
| 110 |
+
return cls(file.read())
|
| 111 |
+
|
| 112 |
+
@classmethod
|
| 113 |
+
def from_predefined(cls, predefined_pipeline: PredefinedPipeline) -> "PipelineTemplate":
|
| 114 |
+
"""
|
| 115 |
+
Create a PipelineTemplate from a predefined template.
|
| 116 |
+
|
| 117 |
+
See `PredefinedPipeline` for available options.
|
| 118 |
+
|
| 119 |
+
:param predefined_pipeline: The predefined pipeline to use.
|
| 120 |
+
:returns: An instance of `PipelineTemplate `.
|
| 121 |
+
"""
|
| 122 |
+
template_path = f"{TEMPLATE_HOME_DIR}/{predefined_pipeline.value}{TEMPLATE_FILE_EXTENSION}"
|
| 123 |
+
return cls.from_file(template_path)
|
| 124 |
+
|
| 125 |
+
@property
|
| 126 |
+
def template_content(self) -> str:
|
| 127 |
+
"""
|
| 128 |
+
Returns the raw template string as a read-only property.
|
| 129 |
+
"""
|
| 130 |
+
return self._template_content
|
testbed/deepset-ai__haystack/haystack/core/pipeline/utils.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Optional, Tuple
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def parse_connect_string(connection: str) -> Tuple[str, Optional[str]]:
|
| 9 |
+
"""
|
| 10 |
+
Returns component-connection pairs from a connect_to/from string.
|
| 11 |
+
|
| 12 |
+
:param connection:
|
| 13 |
+
The connection string.
|
| 14 |
+
:returns:
|
| 15 |
+
A tuple containing the component name and the connection name.
|
| 16 |
+
"""
|
| 17 |
+
if "." in connection:
|
| 18 |
+
split_str = connection.split(".", maxsplit=1)
|
| 19 |
+
return (split_str[0], split_str[1])
|
| 20 |
+
return connection, None
|
testbed/deepset-ai__haystack/haystack/core/serialization.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
import inspect
|
| 6 |
+
from collections.abc import Callable
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from importlib import import_module
|
| 9 |
+
from typing import Any, Dict, Iterable, Optional, Type
|
| 10 |
+
|
| 11 |
+
from haystack.core.component.component import _hook_component_init, logger
|
| 12 |
+
from haystack.core.errors import DeserializationError, SerializationError
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass(frozen=True)
|
| 16 |
+
class DeserializationCallbacks:
|
| 17 |
+
"""
|
| 18 |
+
Callback functions that are invoked in specific stages of the pipeline deserialization process.
|
| 19 |
+
|
| 20 |
+
:param component_pre_init:
|
| 21 |
+
Invoked just before a component instance is
|
| 22 |
+
initialized. Receives the following inputs:
|
| 23 |
+
`component_name` (`str`), `component_class` (`Type`), `init_params` (`Dict[str, Any]`).
|
| 24 |
+
|
| 25 |
+
The callback is allowed to modify the `init_params`
|
| 26 |
+
dictionary, which contains all the parameters that
|
| 27 |
+
are passed to the component's constructor.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
component_pre_init: Optional[Callable] = None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:
|
| 34 |
+
"""
|
| 35 |
+
Converts a component instance into a dictionary.
|
| 36 |
+
|
| 37 |
+
If a `to_dict` method is present in the component instance, that will be used instead of the default method.
|
| 38 |
+
|
| 39 |
+
:param obj:
|
| 40 |
+
The component to be serialized.
|
| 41 |
+
:param name:
|
| 42 |
+
The name of the component.
|
| 43 |
+
:returns:
|
| 44 |
+
A dictionary representation of the component.
|
| 45 |
+
|
| 46 |
+
:raises SerializationError:
|
| 47 |
+
If the component doesn't have a `to_dict` method.
|
| 48 |
+
If the values of the init parameters can't be determined.
|
| 49 |
+
If a non-basic Python type is used in the serialized data.
|
| 50 |
+
"""
|
| 51 |
+
if hasattr(obj, "to_dict"):
|
| 52 |
+
data = obj.to_dict()
|
| 53 |
+
else:
|
| 54 |
+
init_parameters = {}
|
| 55 |
+
for param_name, param in inspect.signature(obj.__init__).parameters.items():
|
| 56 |
+
# Ignore `args` and `kwargs`, used by the default constructor
|
| 57 |
+
if param_name in ("args", "kwargs"):
|
| 58 |
+
continue
|
| 59 |
+
try:
|
| 60 |
+
# This only works if the Component constructor assigns the init
|
| 61 |
+
# parameter to an instance variable or property with the same name
|
| 62 |
+
param_value = getattr(obj, param_name)
|
| 63 |
+
except AttributeError as e:
|
| 64 |
+
# If the parameter doesn't have a default value, raise an error
|
| 65 |
+
if param.default is param.empty:
|
| 66 |
+
raise SerializationError(
|
| 67 |
+
f"Cannot determine the value of the init parameter '{param_name}' "
|
| 68 |
+
f"for the class {obj.__class__.__name__}."
|
| 69 |
+
f"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a "
|
| 70 |
+
f"custom serialization method 'to_dict' to the class."
|
| 71 |
+
) from e
|
| 72 |
+
# In case the init parameter was not assigned, we use the default value
|
| 73 |
+
param_value = param.default
|
| 74 |
+
init_parameters[param_name] = param_value
|
| 75 |
+
|
| 76 |
+
data = default_to_dict(obj, **init_parameters)
|
| 77 |
+
|
| 78 |
+
_validate_component_to_dict_output(obj, name, data)
|
| 79 |
+
return data
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _validate_component_to_dict_output(component: Any, name: str, data: Dict[str, Any]) -> None:
|
| 83 |
+
# Ensure that only basic Python types are used in the serde data.
|
| 84 |
+
def is_allowed_type(obj: Any) -> bool:
|
| 85 |
+
return isinstance(obj, (str, int, float, bool, list, dict, set, tuple, type(None)))
|
| 86 |
+
|
| 87 |
+
def check_iterable(l: Iterable[Any]):
|
| 88 |
+
for v in l:
|
| 89 |
+
if not is_allowed_type(v):
|
| 90 |
+
raise SerializationError(
|
| 91 |
+
f"Component '{name}' of type '{type(component).__name__}' has an unsupported value "
|
| 92 |
+
f"of type '{type(v).__name__}' in the serialized data."
|
| 93 |
+
)
|
| 94 |
+
if isinstance(v, (list, set, tuple)):
|
| 95 |
+
check_iterable(v)
|
| 96 |
+
elif isinstance(v, dict):
|
| 97 |
+
check_dict(v)
|
| 98 |
+
|
| 99 |
+
def check_dict(d: Dict[str, Any]):
|
| 100 |
+
if any(not isinstance(k, str) for k in data.keys()):
|
| 101 |
+
raise SerializationError(
|
| 102 |
+
f"Component '{name}' of type '{type(component).__name__}' has a non-string key in the serialized data."
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
for k, v in d.items():
|
| 106 |
+
if not is_allowed_type(v):
|
| 107 |
+
raise SerializationError(
|
| 108 |
+
f"Component '{name}' of type '{type(component).__name__}' has an unsupported value "
|
| 109 |
+
f"of type '{type(v).__name__}' in the serialized data under key '{k}'."
|
| 110 |
+
)
|
| 111 |
+
if isinstance(v, (list, set, tuple)):
|
| 112 |
+
check_iterable(v)
|
| 113 |
+
elif isinstance(v, dict):
|
| 114 |
+
check_dict(v)
|
| 115 |
+
|
| 116 |
+
check_dict(data)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def generate_qualified_class_name(cls: Type[object]) -> str:
|
| 120 |
+
"""
|
| 121 |
+
Generates a qualified class name for a class.
|
| 122 |
+
|
| 123 |
+
:param cls:
|
| 124 |
+
The class whose qualified name is to be generated.
|
| 125 |
+
:returns:
|
| 126 |
+
The qualified name of the class.
|
| 127 |
+
"""
|
| 128 |
+
return f"{cls.__module__}.{cls.__name__}"
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def component_from_dict(
|
| 132 |
+
cls: Type[object], data: Dict[str, Any], name: str, callbacks: Optional[DeserializationCallbacks] = None
|
| 133 |
+
) -> Any:
|
| 134 |
+
"""
|
| 135 |
+
Creates a component instance from a dictionary.
|
| 136 |
+
|
| 137 |
+
If a `from_dict` method is present in the component class, that will be used instead of the default method.
|
| 138 |
+
|
| 139 |
+
:param cls:
|
| 140 |
+
The class to be used for deserialization.
|
| 141 |
+
:param data:
|
| 142 |
+
The serialized data.
|
| 143 |
+
:param name:
|
| 144 |
+
The name of the component.
|
| 145 |
+
:param callbacks:
|
| 146 |
+
Callbacks to invoke during deserialization.
|
| 147 |
+
:returns:
|
| 148 |
+
The deserialized component.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
def component_pre_init_callback(component_cls, init_params):
|
| 152 |
+
assert callbacks is not None
|
| 153 |
+
assert callbacks.component_pre_init is not None
|
| 154 |
+
callbacks.component_pre_init(name, component_cls, init_params)
|
| 155 |
+
|
| 156 |
+
def do_from_dict():
|
| 157 |
+
if hasattr(cls, "from_dict"):
|
| 158 |
+
return cls.from_dict(data)
|
| 159 |
+
|
| 160 |
+
return default_from_dict(cls, data)
|
| 161 |
+
|
| 162 |
+
if callbacks is None or callbacks.component_pre_init is None:
|
| 163 |
+
return do_from_dict()
|
| 164 |
+
|
| 165 |
+
with _hook_component_init(component_pre_init_callback):
|
| 166 |
+
return do_from_dict()
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:
|
| 170 |
+
"""
|
| 171 |
+
Utility function to serialize an object to a dictionary.
|
| 172 |
+
|
| 173 |
+
This is mostly necessary for components but can be used by any object.
|
| 174 |
+
`init_parameters` are parameters passed to the object class `__init__`.
|
| 175 |
+
They must be defined explicitly as they'll be used when creating a new
|
| 176 |
+
instance of `obj` with `from_dict`. Omitting them might cause deserialisation
|
| 177 |
+
errors or unexpected behaviours later, when calling `from_dict`.
|
| 178 |
+
|
| 179 |
+
An example usage:
|
| 180 |
+
|
| 181 |
+
```python
|
| 182 |
+
class MyClass:
|
| 183 |
+
def __init__(self, my_param: int = 10):
|
| 184 |
+
self.my_param = my_param
|
| 185 |
+
|
| 186 |
+
def to_dict(self):
|
| 187 |
+
return default_to_dict(self, my_param=self.my_param)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
obj = MyClass(my_param=1000)
|
| 191 |
+
data = obj.to_dict()
|
| 192 |
+
assert data == {
|
| 193 |
+
"type": "MyClass",
|
| 194 |
+
"init_parameters": {
|
| 195 |
+
"my_param": 1000,
|
| 196 |
+
},
|
| 197 |
+
}
|
| 198 |
+
```
|
| 199 |
+
|
| 200 |
+
:param obj:
|
| 201 |
+
The object to be serialized.
|
| 202 |
+
:param init_parameters:
|
| 203 |
+
The parameters used to create a new instance of the class.
|
| 204 |
+
:returns:
|
| 205 |
+
A dictionary representation of the instance.
|
| 206 |
+
"""
|
| 207 |
+
return {"type": generate_qualified_class_name(type(obj)), "init_parameters": init_parameters}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def default_from_dict(cls: Type[object], data: Dict[str, Any]) -> Any:
|
| 211 |
+
"""
|
| 212 |
+
Utility function to deserialize a dictionary to an object.
|
| 213 |
+
|
| 214 |
+
This is mostly necessary for components but can be used by any object.
|
| 215 |
+
|
| 216 |
+
The function will raise a `DeserializationError` if the `type` field in `data` is
|
| 217 |
+
missing or it doesn't match the type of `cls`.
|
| 218 |
+
|
| 219 |
+
If `data` contains an `init_parameters` field it will be used as parameters to create
|
| 220 |
+
a new instance of `cls`.
|
| 221 |
+
|
| 222 |
+
:param cls:
|
| 223 |
+
The class to be used for deserialization.
|
| 224 |
+
:param data:
|
| 225 |
+
The serialized data.
|
| 226 |
+
:returns:
|
| 227 |
+
The deserialized object.
|
| 228 |
+
|
| 229 |
+
:raises DeserializationError:
|
| 230 |
+
If the `type` field in `data` is missing or it doesn't match the type of `cls`.
|
| 231 |
+
"""
|
| 232 |
+
init_params = data.get("init_parameters", {})
|
| 233 |
+
if "type" not in data:
|
| 234 |
+
raise DeserializationError("Missing 'type' in serialization data")
|
| 235 |
+
if data["type"] != generate_qualified_class_name(cls):
|
| 236 |
+
raise DeserializationError(f"Class '{data['type']}' can't be deserialized as '{cls.__name__}'")
|
| 237 |
+
return cls(**init_params)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def import_class_by_name(fully_qualified_name: str) -> Type[object]:
|
| 241 |
+
"""
|
| 242 |
+
Utility function to import (load) a class object based on its fully qualified class name.
|
| 243 |
+
|
| 244 |
+
This function dynamically imports a class based on its string name.
|
| 245 |
+
It splits the name into module path and class name, imports the module,
|
| 246 |
+
and returns the class object.
|
| 247 |
+
|
| 248 |
+
:param fully_qualified_name: the fully qualified class name as a string
|
| 249 |
+
:returns: the class object.
|
| 250 |
+
:raises ImportError: If the class cannot be imported or found.
|
| 251 |
+
"""
|
| 252 |
+
try:
|
| 253 |
+
module_path, class_name = fully_qualified_name.rsplit(".", 1)
|
| 254 |
+
logger.debug(f"Attempting to import class '{class_name}' from module '{module_path}'")
|
| 255 |
+
module = import_module(module_path)
|
| 256 |
+
return getattr(module, class_name)
|
| 257 |
+
except (ImportError, AttributeError) as error:
|
| 258 |
+
logger.error(f"Failed to import class '{fully_qualified_name}'")
|
| 259 |
+
raise ImportError(f"Could not import class '{fully_qualified_name}'") from error
|
testbed/deepset-ai__haystack/haystack/core/type_utils.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from typing import Any, Union, get_args, get_origin
|
| 6 |
+
|
| 7 |
+
from haystack import logging
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _is_optional(type_: type) -> bool:
|
| 13 |
+
"""
|
| 14 |
+
Utility method that returns whether a type is Optional.
|
| 15 |
+
"""
|
| 16 |
+
return get_origin(type_) is Union and type(None) in get_args(type_)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements
|
| 20 |
+
"""
|
| 21 |
+
Checks whether the source type is equal or a subtype of the destination type. Used to validate pipeline connections.
|
| 22 |
+
|
| 23 |
+
Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of
|
| 24 |
+
typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well
|
| 25 |
+
with "bare" types, so `List` is treated differently from `List[Any]`, even though they should be the same.
|
| 26 |
+
|
| 27 |
+
Consider simplifying the typing of your components if you observe unexpected errors during component connection.
|
| 28 |
+
"""
|
| 29 |
+
if sender == receiver or receiver is Any:
|
| 30 |
+
return True
|
| 31 |
+
|
| 32 |
+
if sender is Any:
|
| 33 |
+
return False
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
if issubclass(sender, receiver):
|
| 37 |
+
return True
|
| 38 |
+
except TypeError: # typing classes can't be used with issubclass, so we deal with them below
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
sender_origin = get_origin(sender)
|
| 42 |
+
receiver_origin = get_origin(receiver)
|
| 43 |
+
|
| 44 |
+
if sender_origin is not Union and receiver_origin is Union:
|
| 45 |
+
return any(_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))
|
| 46 |
+
|
| 47 |
+
if not sender_origin or not receiver_origin or sender_origin != receiver_origin:
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
sender_args = get_args(sender)
|
| 51 |
+
receiver_args = get_args(receiver)
|
| 52 |
+
if len(sender_args) > len(receiver_args):
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
return all(_types_are_compatible(*args) for args in zip(sender_args, receiver_args))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _type_name(type_):
|
| 59 |
+
"""
|
| 60 |
+
Util methods to get a nice readable representation of a type.
|
| 61 |
+
|
| 62 |
+
Handles Optional and Literal in a special way to make it more readable.
|
| 63 |
+
"""
|
| 64 |
+
# Literal args are strings, so we wrap them in quotes to make it clear
|
| 65 |
+
if isinstance(type_, str):
|
| 66 |
+
return f"'{type_}'"
|
| 67 |
+
|
| 68 |
+
name = getattr(type_, "__name__", str(type_))
|
| 69 |
+
|
| 70 |
+
if name.startswith("typing."):
|
| 71 |
+
name = name[7:]
|
| 72 |
+
if "[" in name:
|
| 73 |
+
name = name.split("[")[0]
|
| 74 |
+
args = get_args(type_)
|
| 75 |
+
if name == "Union" and type(None) in args and len(args) == 2:
|
| 76 |
+
# Optional is technically a Union of type and None
|
| 77 |
+
# but we want to display it as Optional
|
| 78 |
+
name = "Optional"
|
| 79 |
+
|
| 80 |
+
if args:
|
| 81 |
+
args = ", ".join([_type_name(a) for a in args if a is not type(None)])
|
| 82 |
+
return f"{name}[{args}]"
|
| 83 |
+
|
| 84 |
+
return f"{name}"
|