diff --git a/testbed/deepset-ai__haystack/.gitignore b/testbed/deepset-ai__haystack/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..063a0e7661b2b62b4bf2997a51925422733e4695
--- /dev/null
+++ b/testbed/deepset-ai__haystack/.gitignore
@@ -0,0 +1,162 @@
+# Local run files
+qa.db
+**/qa.db
+**/*qa*.db
+**/test-reports
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# documentation
+docs/pydoc/temp/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# PyCharm
+.idea
+
+# VSCode
+.vscode
+
+# haystack files
+haystack/document_store/qa.db
+data
+**/mlruns/**
+src
+models
+saved_models
+*_build
+rest_api/file-upload/*
+**/feedback_squad_direct.json
+haystack/json-schemas
+.haystack_debug
+
+.DS_Store
+
+# http cache (requests-cache)
+**/http_cache.sqlite
+
+# ruff
+.ruff_cache
diff --git a/testbed/deepset-ai__haystack/.pre-commit-config.yaml b/testbed/deepset-ai__haystack/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..483ed06b0885afcb549eca0bda55b899915c2ead
--- /dev/null
+++ b/testbed/deepset-ai__haystack/.pre-commit-config.yaml
@@ -0,0 +1,37 @@
+fail_fast: true
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.6.0
+ hooks:
+ - id: check-ast # checks Python syntax
+ - id: check-json # checks JSON syntax
+ - id: check-merge-conflict # checks for no merge conflict strings
+ - id: check-shebang-scripts-are-executable # checks all shell scripts have executable permissions
+ - id: check-toml # checks TOML syntax
+ - id: check-yaml # checks YAML syntax
+ - id: end-of-file-fixer # checks there is a newline at the end of the file
+ - id: mixed-line-ending # normalizes line endings
+ - id: no-commit-to-branch # prevents committing to main
+ - id: trailing-whitespace # trims trailing whitespace
+ args: [--markdown-linebreak-ext=md]
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.5.0
+ hooks:
+ - id: ruff
+ - id: ruff-format
+
+ - repo: https://github.com/codespell-project/codespell
+ rev: v2.3.0
+ hooks:
+ - id: codespell
+ args: ["--toml", "pyproject.toml"]
+ additional_dependencies:
+ - tomli
+
+ - repo: https://github.com/rhysd/actionlint
+ rev: v1.7.1
+ hooks:
+ - id: actionlint-docker
+ args: ["-ignore", "SC2102"]
diff --git a/testbed/deepset-ai__haystack/CITATION.cff b/testbed/deepset-ai__haystack/CITATION.cff
new file mode 100644
index 0000000000000000000000000000000000000000..122468655702f8129e99863a7e8bd0be29680ae9
--- /dev/null
+++ b/testbed/deepset-ai__haystack/CITATION.cff
@@ -0,0 +1,30 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it using these metadata."
+title: "Haystack: the end-to-end NLP framework for pragmatic builders"
+date-released: 2019-11-14
+url: "https://github.com/deepset-ai/haystack"
+authors:
+- family-names: Pietsch
+ given-names: Malte
+- family-names: Möller
+ given-names: Timo
+- family-names: Kostic
+ given-names: Bogdan
+- family-names: Risch
+ given-names: Julian
+- family-names: Pippi
+ given-names: Massimiliano
+- family-names: Jobanputra
+ given-names: Mayank
+- family-names: Zanzottera
+ given-names: Sara
+- family-names: Cerza
+ given-names: Silvano
+- family-names: Blagojevic
+ given-names: Vladimir
+- family-names: Stadelmann
+ given-names: Thomas
+- family-names: Soni
+ given-names: Tanay
+- family-names: Lee
+ given-names: Sebastian
diff --git a/testbed/deepset-ai__haystack/CONTRIBUTING.md b/testbed/deepset-ai__haystack/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..5a2e23b27e23c14b8bcfe91747a1c383662a16f3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/CONTRIBUTING.md
@@ -0,0 +1,382 @@
+# Contributing to Haystack
+
+First off, thanks for taking the time to contribute! :blue_heart:
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents)
+for different ways to help and details about how this project handles them. Please make sure to read
+the relevant section before making your contribution. It will make it a lot easier for us maintainers
+and smooth out the experience for all involved. The community looks forward to your contributions!
+
+> [!TIP]
+> If you like Haystack but just don't have time to contribute, that's fine. There are other easy ways to support the
+> project and show your appreciation, which we would also be very happy about:
+> - Star this repository
+> - Tweet about it
+> - Mention Haystack at local meetups and tell your friends/colleagues
+
+**Table of Contents**
+
+- [Contributing to Haystack](#contributing-to-haystack)
+ - [Code of Conduct](#code-of-conduct)
+ - [I Have a Question](#i-have-a-question)
+ - [Reporting Bugs](#reporting-bugs)
+ - [Before Submitting a Bug Report](#before-submitting-a-bug-report)
+ - [How Do I Submit a Good Bug Report?](#how-do-i-submit-a-good-bug-report)
+ - [Suggesting Enhancements](#suggesting-enhancements)
+ - [Before Submitting an Enhancement](#before-submitting-an-enhancement)
+ - [How Do I Submit a Good Enhancement Suggestion?](#how-do-i-submit-a-good-enhancement-suggestion)
+ - [Contribute code](#contribute-code)
+ - [Where to start](#where-to-start)
+ - [Setting up your development environment](#setting-up-your-development-environment)
+ - [Clone the git repository](#clone-the-git-repository)
+ - [Run the tests locally](#run-the-tests-locally)
+ - [Requirements for Pull Requests](#requirements-for-pull-requests)
+ - [Release notes](#release-notes)
+ - [CI (Continuous Integration)](#ci-continuous-integration)
+ - [Working from GitHub forks](#working-from-github-forks)
+ - [Writing tests](#writing-tests)
+ - [Unit test](#unit-test)
+ - [Integration test](#integration-test)
+ - [End to End (e2e) test](#end-to-end-e2e-test)
+ - [Contributor Licence Agreement (CLA)](#contributor-licence-agreement-cla)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by our [Code of Conduct](code_of_conduct.txt).
+By participating, you are expected to uphold this code. Please report unacceptable behavior to haystack@deepset.ai.
+
+## I Have a Question
+
+> [!TIP]
+> If you want to ask a question, we assume that you have read the available [Documentation](https://docs.haystack.deepset.ai/docs/intro).
+
+Before you ask a question, it is best to search for existing [Issues](/issues) that might help you. In case you have
+found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to
+search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, you can use one of our
+[Community Channels](https://haystack.deepset.ai/community), Discord, in particular, is often very helpful.
+
+## Reporting Bugs
+
+### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to
+investigate carefully, collect information, and describe the issue in detail in your report. Please complete the
+following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side, for example using incompatible versions.
+ Make sure that you have read the [documentation](https://docs.haystack.deepset.ai/docs/intro). If you are looking
+ for support, you might want to check [this section](#i-have-a-question).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there
+ is not already a bug report existing for your bug or error in the [bug tracker](/issues).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have
+ discussed the issue.
+- Collect information about the bug:
+ - OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+ - Version of Haystack and the integrations you're using
+ - Possibly your input and the output
+ - If you can reliably reproduce the issue, a snippet of code we can use
+
+### How Do I Submit a Good Bug Report?
+
+> [!IMPORTANT]
+> You must never report security-related issues, vulnerabilities, or bugs, including sensitive information to the issue
+> tracker, or elsewhere in public. Instead, sensitive bugs must be reported using [this link](https://github.com/deepset-ai/haystack/security/advisories/new).
+>
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue of type Bug Report](/issues/new?assignees=&labels=bug&projects=&template=bug_report.md&title=).
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to
+ recreate the issue on their own. This usually includes your code. For good bug reports, you should isolate the problem
+ and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no
+ obvious way to reproduce the issue, the team will ask you for those steps.
+- If the team is able to reproduce the issue, the issue will scheduled for a fix or left to be
+ [implemented by someone](#your-first-code-contribution).
+
+## Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion, including new integrations and improvements
+to existing ones. Following these guidelines will help maintainers and the community to understand your suggestion and
+find related suggestions.
+
+### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation](https://docs.haystack.deepset.ai/docs/intro) carefully and find out if the functionality
+ is already covered, maybe by an individual configuration.
+- Perform a [search](/issues) to see if the enhancement has already been suggested. If it has, add a comment to the
+ existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to
+ convince the project's developers of the merits of this feature. Keep in mind that we want features that will be
+ useful to the majority of our users and not just a small subset. If you're just targeting a minority of users,
+ consider writing and distributing the integration on your own.
+
+### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as GitHub issues of type [Feature request for existing integrations](/issues/new?assignees=&labels=feature+request&projects=&template=feature-request-for-existing-integrations.md&title=).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Fill the issue following the template
+
+## Contribute code
+
+> [!IMPORTANT]
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the
+> necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Where to start
+
+If this is your first contribution, a good starting point is looking for an open issue that's marked with the label
+["good first issue"](https://github.com/deepset-ai/haystack/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
+The core contributors periodically mark certain issues as good for first-time contributors. Those issues are usually
+limited in scope, easily fixable and low priority, so there is absolutely no reason why you should not try fixing them,
+it's a good excuse to start looking into the project and a safe space for experimenting failure: if you don't get the
+grasp of something, pick another one! Once you become comfortable contributing to Haystack, you can have a look at the
+list of issues marked as [contributions wanted](https://github.com/orgs/deepset-ai/projects/14/views/1) to look for your
+next contribution!
+
+### Setting up your development environment
+
+Haystack makes heavy use of [Hatch](https://hatch.pypa.io/latest/), a Python project manager that we use to set up the
+virtual environments, build the project, and publish packages. As you can imagine, the first step towards becoming a
+Haystack contributor is installing Hatch. There are a variety of installation methods depending on your operating system
+platform, version, and personal taste: please have a look at [this page](https://hatch.pypa.io/latest/install/#installation)
+and keep reading once you can run from your terminal:
+
+```console
+$ hatch --version
+Hatch, version 1.9.3
+```
+
+You can create a new virtual environment for Haystack with `hatch` by running:
+
+```console
+$ hatch shell
+```
+
+### Clone the git repository
+
+You won't be able to make changes directly to this repo, so the first step is to [create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo).
+Once your fork is ready, you can clone a local copy with:
+
+```console
+$ git clone https://github.com/YOUR-USERNAME/haystack
+```
+
+If everything worked, you should be able to do something like this (the output might be different):
+
+```console
+$ cd haystack
+$ hatch version
+2.3.0-rc0
+```
+
+Last, install the pre-commit hooks with:
+
+```bash
+pre-commit install
+```
+
+This utility will run some tasks right before all `git commit` operations. From now on, your `git commit` output for
+Haystack should look something like this:
+
+```
+> git commit -m "test"
+check python ast.........................................................Passed
+check json...........................................(no files to check)Skipped
+check for merge conflicts................................................Passed
+check that scripts with shebangs are executable..........................Passed
+check toml...........................................(no files to check)Skipped
+check yaml...........................................(no files to check)Skipped
+fix end of files.........................................................Passed
+mixed line ending........................................................Passed
+don't commit to branch...................................................Passed
+trim trailing whitespace.................................................Passed
+ruff.....................................................................Passed
+codespell................................................................Passed
+Lint GitHub Actions workflow files...................(no files to check)Skipped
+[massi/contrib d18a2577] test
+ 2 files changed, 178 insertions(+), 45 deletions(-)
+```
+
+### Run the tests locally
+
+Tests will automatically run in our CI for every commit you push to your PR on Github. In order to save precious CI time
+we encourage you to run the tests locally before pushing new commits to Github. From the root of the git repository,
+you can run all the unit tests like this:
+
+```sh
+hatch run test:unit
+```
+
+Hatch will create a dedicated virtual environment, sync the required dependencies and run all the unit tests from the
+project. If you want to run a subset of the tests or even one test in particular, `hatch` will accept all the
+options you would normally pass to `pytest`, for example:
+
+```sh
+# run one test method from a specific test class in a test file
+hatch run test:unit test/test_logging.py::TestSkipLoggingConfiguration::test_skip_logging_configuration
+```
+
+### Run code quality checks locally
+
+We also use tools to ensure consistent code style, quality, and static type checking. The quality of your code will be
+tested by the CI, but once again, running the checks locally will speed up the review cycle.
+
+
+To check your code type checking, run:
+```sh
+hatch run test:types
+```
+
+
+To check your code format run:
+```sh
+hatch run format-check
+```
+
+
+To format your code, you can run:
+```sh
+hatch run format
+````
+
+
+To check your code style according to linting rules run:
+```sh
+hatch run check
+hatch run test:lint
+````
+
+
+If the linters spot any error, you can fix it before checking in your code:
+```sh
+hatch run fix
+```
+
+
+## Requirements for Pull Requests
+
+To ease the review process, please follow the instructions in this paragraph when creating a Pull Request:
+
+- For the title, use the [conventional commit convention](https://www.conventionalcommits.org/en/v1.0.0/).
+- For the body, follow the existing [pull request template](https://github.com/deepset-ai/haystack/blob/main/.github/pull_request_template.md) to describe and document your changes.
+
+### Release notes
+
+Each PR must include a release notes file under the `releasenotes/notes` path created with `reno`, and a CI check will
+fail if that's not the case. Pull requests with changes limited to tests, code comments or docstrings, and changes to
+the CI/CD systems can be labeled with `ignore-for-release-notes` by a maintainer in order to bypass the CI check.
+
+For example, if your PR is bumping the `transformers` version in the `pyproject.toml` file, that's something that
+requires release notes. To create the corresponding file, from the root of the repo run:
+
+```
+$ hatch run release-note bump-transformers-to-4-31
+```
+
+A release notes file in YAML format will be created in the appropriate folder, appending a unique id to the name of the
+release note you provided (in this case, `bump-transformers-to-4-31`). To add the actual content of the release notes,
+you must edit the file that's just been created. In the file, you will find multiple sections along with an explanation
+of what they're for. You have to remove all the sections that don't fit your release notes, in this case for example
+you would fill in the `enhancements` section to describe the change:
+
+```yaml
+enhancements:
+ - |
+ Upgrade transformers to the latest version 4.31.0 so that Haystack can support the new LLama2 models.
+```
+
+You can now add the file to the same branch containing the code changes. Your release note will be part of your pull
+request and reviewed along with any code you changed.
+
+## CI (Continuous Integration)
+
+We use GitHub Action for our Continuous Integration tasks. This means that as soon as you open a PR, GitHub will start
+executing some workflows on your changes, like automated tests, linting, formatting, api docs generation, etc.
+
+If all goes well, at the bottom of your PR page you should see something like this, where all checks are green.
+
+
+
+If you see some red checks (like the following), then something didn't work, and action is needed from your side.
+
+
+
+Click on the failing test and see if there are instructions at the end of the logs of the failed test.
+For example, in the case above, the CI will give you instructions on how to fix the issue.
+
+
+
+## Working from GitHub forks
+
+In order for maintainers to be able to help you, we usually ask contributors to give us push access to their fork.
+
+To do so, please verify that "Allow edits and access to secrets by maintainers" on the PR preview page is checked
+(you can check it later on the PR's sidebar once it's created).
+
+
+
+## Writing tests
+
+We formally define three scopes for tests in Haystack with different requirements and purposes:
+
+### Unit test
+- Tests a single logical concept
+- Execution time is a few milliseconds
+- Any external resource is mocked
+- Always returns the same result
+- Can run in any order
+- Runs at every commit in PRs, automated through `hatch run test:unit`
+- Can run locally with no additional setup
+- **Goal: being confident in merging code**
+
+### Integration test
+- Tests a single logical concept
+- Execution time is a few seconds
+- It uses external resources that must be available before execution
+- When using models, cannot use inference
+- Always returns the same result or an error
+- Can run in any order
+- Runs at every commit in PRs, automated through `hatch run test:integration`
+- Can run locally with some additional setup (e.g. Docker)
+- **Goal: being confident in merging code**
+
+### End to End (e2e) test
+- Tests a sequence of multiple logical concepts
+- Execution time has no limits (can be always on)
+- Can use inference
+- Evaluates the results of the execution or the status of the system
+- It uses external resources that must be available before execution
+- Can return different results
+- Can be dependent on the order
+- Can be wrapped into any process execution
+- Runs outside the development cycle (nightly or on demand)
+- Might not be possible to run locally due to system and hardware requirements
+- **Goal: being confident in releasing Haystack**
+
+## Contributor Licence Agreement (CLA)
+
+Significant contributions to Haystack require a Contributor License Agreement (CLA). If the contribution requires a CLA,
+we will get in contact with you. CLAs are quite common among company-backed open-source frameworks, and our CLA’s wording
+is similar to other popular projects, like [Rasa](https://cla-assistant.io/RasaHQ/rasa) or
+[Google's Tensorflow](https://cla.developers.google.com/clas/new?domain=DOMAIN_GOOGLE&kind=KIND_INDIVIDUAL)
+(retrieved 4th November 2021).
+
+The agreement's main purpose is to protect the continued open use of Haystack. At the same time, it also helps in
+\protecting you as a contributor. Contributions under this agreement will ensure that your code will continue to be
+open to everyone in the future (“You hereby grant to Deepset **and anyone** [...]”) as well as remove liabilities on
+your end (“you provide your Contributions on an AS IS basis, without warranties or conditions of any kind [...]”). You
+can find the Contributor Licence Agreement [here](https://cla-assistant.io/deepset-ai/haystack).
+
+If you have further questions about the licensing, feel free to reach out to contributors@deepset.ai.
diff --git a/testbed/deepset-ai__haystack/LICENSE b/testbed/deepset-ai__haystack/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6fb66d8d5fa55912ee80f4fdab94cab99140c448
--- /dev/null
+++ b/testbed/deepset-ai__haystack/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 deepset GmbH
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/testbed/deepset-ai__haystack/README.md b/testbed/deepset-ai__haystack/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..10566904e26586187d9cfdb68dbf9fb8603cfd21
--- /dev/null
+++ b/testbed/deepset-ai__haystack/README.md
@@ -0,0 +1,121 @@
+
+
+[Haystack](https://haystack.deepset.ai/) is an end-to-end LLM framework that allows you to build applications powered by
+LLMs, Transformer models, vector search and more. Whether you want to perform retrieval-augmented generation (RAG),
+document search, question answering or answer generation, Haystack can orchestrate state-of-the-art embedding models
+and LLMs into pipelines to build end-to-end NLP applications and solve your use case.
+
+## Installation
+
+The simplest way to get Haystack is via pip:
+
+```sh
+pip install haystack-ai
+```
+
+Install from the `main` branch to try the newest features:
+```sh
+pip install git+https://github.com/deepset-ai/haystack.git@main
+```
+
+Haystack supports multiple installation methods including Docker images. For a comprehensive guide please refer
+to the [documentation](https://docs.haystack.deepset.ai/docs/installation).
+
+## Documentation
+
+If you're new to the project, check out ["What is Haystack?"](https://haystack.deepset.ai/overview/intro) then go
+through the ["Get Started Guide"](https://haystack.deepset.ai/overview/quick-start) and build your first LLM application
+in a matter of minutes. Keep learning with the [tutorials](https://haystack.deepset.ai/tutorials). For more advanced
+use cases, or just to get some inspiration, you can browse our Haystack recipes in the
+[Cookbook](https://haystack.deepset.ai/cookbook).
+
+At any given point, hit the [documentation](https://docs.haystack.deepset.ai/docs/intro) to learn more about Haystack, what can it do for you and the technology behind.
+
+## Features
+
+> [!IMPORTANT]
+> **You are currently looking at the readme of Haystack 2.0**. We are still maintaining Haystack 1.x to give everyone
+> enough time to migrate to 2.0. [Switch to Haystack 1.x here](https://github.com/deepset-ai/haystack/tree/v1.x).
+
+- **Technology agnostic:** Allow users the flexibility to decide what vendor or technology they want and make it easy to switch out any component for another. Haystack allows you to use and compare models available from OpenAI, Cohere and Hugging Face, as well as your own local models or models hosted on Azure, Bedrock and SageMaker.
+- **Explicit:** Make it transparent how different moving parts can “talk” to each other so it's easier to fit your tech stack and use case.
+- **Flexible:** Haystack provides all tooling in one place: database access, file conversion, cleaning, splitting, training, eval, inference, and more. And whenever custom behavior is desirable, it's easy to create custom components.
+- **Extensible:** Provide a uniform and easy way for the community and third parties to build their own components and foster an open ecosystem around Haystack.
+
+Some examples of what you can do with Haystack:
+
+- Build **retrieval augmented generation (RAG)** by making use of one of the available vector databases and customizing your LLM interaction, the sky is the limit 🚀
+- Perform Question Answering **in natural language** to find granular answers in your documents.
+- Perform **semantic search** and retrieve documents according to meaning.
+- Build applications that can make complex decisions making to answer complex queries: such as systems that can resolve complex customer queries, do knowledge search on many disconnected resources and so on.
+- Scale to millions of docs using retrievers and production-scale components.
+- Use **off-the-shelf models** or **fine-tune** them to your data.
+- Use **user feedback** to evaluate, benchmark, and continuously improve your models.
+
+> [!TIP]
+>
+>
+> Are you looking for a managed solution that benefits from Haystack? [deepset Cloud](https://www.deepset.ai/deepset-cloud?utm_campaign=developer-relations&utm_source=haystack&utm_medium=readme) is our fully managed, end-to-end platform to integrate LLMs with your data, which uses Haystack for the LLM pipelines architecture.
+
+## 🔜 Visual Pipeline Editor
+
+Use **deepset Studio** to visually create and export your Haystack pipeline architecture as a YAML or as Python code. Learn more about it in [our announcement post](https://haystack.deepset.ai/blog/announcing-studio).
+
+
+
+
+👉 [Join the waitlist](https://landing.deepset.ai/deepset-studio-waitlist?utm_campaign=2408%20-%20Campaign%20-%20Studio%20Launch&utm_source=github&utm_medium=referral)!
+
+## Telemetry
+
+Haystack collects **anonymous** usage statistics of pipeline components. We receive an event every time these components are initialized. This way, we know which components are most relevant to our community.
+
+Read more about telemetry in Haystack or how you can opt out in [Haystack docs](https://docs.haystack.deepset.ai/docs/telemetry).
+
+## 🖖 Community
+
+If you have a feature request or a bug report, feel free to open an [issue in Github](https://github.com/deepset-ai/haystack/issues). We regularly check these and you can expect a quick response. If you'd like to discuss a topic, or get more general advice on how to make Haystack work for your project, you can start a thread in [Github Discussions](https://github.com/deepset-ai/haystack/discussions) or our [Discord channel](https://discord.com/invite/VBpFzsgRVF). We also check [𝕏 (Twitter)](https://twitter.com/haystack_ai) and [Stack Overflow](https://stackoverflow.com/questions/tagged/haystack).
+
+## Contributing to Haystack
+
+We are very open to the community's contributions - be it a quick fix of a typo, or a completely new feature! You don't need to be a Haystack expert to provide meaningful improvements. To learn how to get started, check out our [Contributor Guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) first.
+
+There are several ways you can contribute to Haystack:
+- Contribute to the main Haystack project
+- Contribute an integration on [haystack-core-integrations](https://github.com/deepset-ai/haystack-core-integrations)
+
+> [!TIP]
+>👉 **[Check out the full list of issues that are open to contributions](https://github.com/orgs/deepset-ai/projects/14)**
+
+## Who Uses Haystack
+
+Here's a list of projects and companies using Haystack. Want to add yours? Open a PR, add it to the list and let the
+world know that you use Haystack!
+
+- [Airbus](https://www.airbus.com/en)
+- [Alcatel-Lucent](https://www.al-enterprise.com/)
+- [Apple](https://www.apple.com/)
+- [BetterUp](https://www.betterup.com/)
+- [Databricks](https://www.databricks.com/)
+- [Deepset](https://deepset.ai/)
+- [Etalab](https://www.deepset.ai/blog/improving-on-site-search-for-government-agencies-etalab)
+- [Infineon](https://www.infineon.com/)
+- [Intel](https://github.com/intel/open-domain-question-and-answer#readme)
+- [Intelijus](https://www.intelijus.ai/)
+- [Intel Labs](https://github.com/IntelLabs/fastRAG#readme)
+- [LEGO](https://github.com/larsbaunwall/bricky#readme)
+- [Netflix](https://netflix.com)
+- [NOS Portugal](https://www.nos.pt/en/welcome)
+- [Nvidia](https://developer.nvidia.com/blog/reducing-development-time-for-intelligent-virtual-assistants-in-contact-centers/)
+- [PostHog](https://github.com/PostHog/max-ai#readme)
+- [Rakuten](https://www.rakuten.com/)
+- [Sooth.ai](https://www.deepset.ai/blog/advanced-neural-search-with-sooth-ai)
diff --git a/testbed/deepset-ai__haystack/SECURITY.md b/testbed/deepset-ai__haystack/SECURITY.md
new file mode 100644
index 0000000000000000000000000000000000000000..1d7370e79deec354cb3702e05803c545af7bb2ab
--- /dev/null
+++ b/testbed/deepset-ai__haystack/SECURITY.md
@@ -0,0 +1,26 @@
+# Security Policy
+
+## Report a Vulnerability
+
+If you found a security vulnerability in Haystack, send a message to
+[security@deepset.ai](mailto:security@deepset.ai).
+
+In your message, please include:
+
+1. Reproducible steps to trigger the vulnerability.
+2. An explanation of what makes you think there is a vulnerability.
+3. Any information you may have on active exploitations of the vulnerability (zero-day).
+
+## Vulnerability Response
+
+We'll review your report within 5 business days and we will do a preliminary analysis
+to confirm that the vulnerability is plausible. Otherwise, we'll decline the report.
+
+We won't disclose any information you share with us but we'll use it to get the issue
+fixed or to coordinate a vendor response, as needed.
+
+We'll keep you updated of the status of the issue.
+
+Our goal is to disclose bugs as soon as possible once a user mitigation is available.
+Once we get a good understanding of the vulnerability, we'll set a disclosure date after
+consulting the author of the report and Haystack maintainers.
diff --git a/testbed/deepset-ai__haystack/VERSION.txt b/testbed/deepset-ai__haystack/VERSION.txt
new file mode 100644
index 0000000000000000000000000000000000000000..32a386165cc8fa3aed468c3bd0b4e9ca377ea78b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/VERSION.txt
@@ -0,0 +1 @@
+2.8.0-rc0
diff --git a/testbed/deepset-ai__haystack/code_of_conduct.txt b/testbed/deepset-ai__haystack/code_of_conduct.txt
new file mode 100644
index 0000000000000000000000000000000000000000..61135cb385878d854077c13b51349d322057ecf3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/code_of_conduct.txt
@@ -0,0 +1,98 @@
+CONTRIBUTOR COVENANT CODE OF CONDUCT
+====================================
+
+Our Pledge
+----------
+
+We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for
+everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics,
+gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance,
+race, caste, color, religion, or sexual identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
+
+Our Standards
+-------------
+
+Examples of behavior that contributes to a positive environment for our community include:
+ - Demonstrating empathy and kindness toward other people
+ - Being respectful of differing opinions, viewpoints, and experiences
+ - Giving and gracefully accepting constructive feedback
+ - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
+ - Focusing on what is best not just for us as individuals, but for the overall community
+
+Examples of unacceptable behavior include:
+ - The use of sexualized language or imagery, and sexual attention or advances of any kind
+ - Trolling, insulting or derogatory comments, and personal or political attacks
+ - Public or private harassment
+ - Publishing others’ private information, such as a physical or email address, without their explicit permission
+ - Other conduct which could reasonably be considered inappropriate in a professional setting
+
+Enforcement Responsibilities
+----------------------------
+
+Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take
+appropriate and fair corrective action in response to any behavior that they deem inappropriate,
+threatening, offensive, or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits,
+issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for
+moderation decisions when appropriate.
+
+Scope
+-----
+
+This Code of Conduct applies within all community spaces, and also applies when an individual is officially
+representing the community in public spaces. Examples of representing our community include using an official
+e-mail address, posting via an official social media account, or acting as an appointed representative
+at an online or offline event.
+
+Enforcement
+-----------
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible
+for enforcement at engage@deepset.ai. All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the reporter of any incident.
+
+Enforcement Guidelines
+----------------------
+
+Community leaders will follow these Community Impact Guidelines in determining the consequences for any action
+they deem in violation of this Code of Conduct:
+
+1. Correction
+ Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
+
+ Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation
+ and an explanation of why the behavior was inappropriate. A public apology may be requested.
+
+2. Warning
+ Community Impact: A violation through a single incident or series of actions.
+
+ Consequence: A warning with consequences for continued behavior. No interaction with the people involved,
+ including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time.
+ This includes avoiding interactions in community spaces as well as external channels like social media.
+ Violating these terms may lead to a temporary or permanent ban.
+
+3. Temporary Ban
+ Community Impact: A serious violation of community standards, including sustained inappropriate behavior.
+
+ Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified
+ period of time. No public or private interaction with the people involved, including unsolicited interaction with
+ those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
+
+4. Permanent Ban
+ Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
+
+ Consequence: A permanent ban from any sort of public interaction within the community.
+
+Attribution
+-----------
+
+This Code of Conduct is adapted from the Contributor Covenant, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by Mozilla’s code of conduct enforcement ladder.
+
+For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq.
+Translations are available at https://www.contributor-covenant.org/translations.
diff --git a/testbed/deepset-ai__haystack/docs/README.md b/testbed/deepset-ai__haystack/docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5e5c1e877ee3d2e1405dd1a4100344858f2aa94f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/README.md
@@ -0,0 +1,57 @@
+# :ledger: Looking for the docs?
+
+You can find Haystack's documentation at https://docs.haystack.deepset.ai/.
+
+# :computer: How to update docs?
+
+## Overview, Components, Pipeline Nodes, and Guides
+
+You can find these docs on the Haystack Docs page: https://docs.haystack.deepset.ai/docs/get_started. If you want to contribute, and we welcome every contribution, do the following:
+1. Make sure you're on the right version (check the version expanding list in the top left corner).
+2. Use the "Suggest Edits" link you can find in the top right corner of every page.
+3. Suggest a change right in the docs and click **Submit Suggested Edits**.
+4. Optionally, leave us a comment and submit your change.
+
+Once we take care of it, you'll get an email telling you the change's been merged, or not. If not, we'll give you the reason why.
+
+Make sure to check our [Contribution Guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md).
+
+## Tutorials
+
+The Tutorials live in a separate repo: https://github.com/deepset-ai/haystack-tutorials. For instructions on how to contribute to tutorials, see [Contributing to Tutorials](https://github.com/deepset-ai/haystack-tutorials/blob/main/Contributing.md#contributing-to-haystack-tutorials).
+
+## API Reference
+
+We use Pydoc-Markdown to create Markdown files from the docstrings in our code. There is a Github Action that regenerates the API pages with each commit.
+
+If you want to generate a new Markdown file for a new Haystack module, create a `.yml` file in `docs/src/api/api` which configures how Pydoc-Markdown will generate the page and commit it to main.
+
+All the updates to doctrings get pushed to documentation when you commit to the main branch.
+
+### Configuration
+
+Pydoc will read the configuration from a `.yml` file which is located under `/haystack/docs/_src/api/pydoc`. Our files contain three main sections:
+
+- **loader**: A list of plugins that load API objects from python source files.
+ - **type**: Loader for python source files
+ - **search_path**: Location of source files
+ - **modules**: Module which are used for generating the markdown file
+ - **ignore_when_discovered**: Define which files should be ignored
+- **processor**: A list of plugins that process API objects to modify their docstrings (e.g. to adapt them from a documentation format to Markdown or to remove items that should not be rendered into the documentation).
+ - **type: filter**: Filter for specific modules
+ - **documented_only**: Only documented API objects
+ - **do_not_filter_modules**: Do not filter module objects
+ - **skip_empty_modules**: Skip modules without content
+- **renderer**: A plugin that produces the output files. We use a custom ReadmeRenderer based on the Markdown renderer. It makes sure the Markdown files comply with ReadMe requirements.
+ - **type**: Define the renderer which you want to use. We are using the ReadmeRenderer to make sure the files display properly in ReadMe.
+ - **excerpt**: Add a short description of the page. It shows up right below the page title.
+ - **category**: This is the ReadMe category ID to make sure the doc lands in the right section of Haystack docs.
+ - **title**: The title of the doc as it will appear on the website. Make sure you always add "API" at the end.
+ - **slug**: The page slug, each word should be separated with a dash.
+ - **order**: Pages are ordered alphabetically. This defines where in the TOC the page lands.
+ - markdown:
+ - **descriptive_class_title**: Remove the word "Object" from class titles.
+ - **descriptive_module_title**: Adding the word “Module” before the module name.
+ - **add_method_class_prefix**: Add the class name as a prefix to method names.
+ - **add_member_class_prefix**: Add the class name as a prefix to member names.
+ - **filename**: File name of the generated file, use underscores to separate each word.
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/audio_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/audio_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8c6a6708866db2c6e5b49b102a5c4d17a70f744e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/audio_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/audio]
+ modules: ["whisper_local", "whisper_remote"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Transcribes audio files.
+ category_slug: haystack-api
+ title: Audio
+ slug: audio-api
+ order: 3
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: audio_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/builders_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/builders_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d16085b0c42107eeda21aa03a0385730a34b29d8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/builders_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/builders]
+ modules: ["answer_builder", "prompt_builder", "chat_prompt_builder"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Extract the output of a Generator to an Answer format, and build prompts.
+ category_slug: haystack-api
+ title: Builders
+ slug: builders-api
+ order: 5
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: builders_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/caching_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/caching_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a38a5ba9381ff40cb6d382b7ae1343da66d2e661
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/caching_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/caching]
+ modules: ["cache_checker"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Checks if any document coming from the given URL is already present in the store.
+ category_slug: haystack-api
+ title: Caching
+ slug: caching-api
+ order: 7
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: cachings_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/classifiers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/classifiers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..14a97ee7bd38a33297961a29e06cce2c954539d2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/classifiers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/classifiers]
+ modules: ["document_language_classifier", "zero_shot_document_classifier"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Classify documents based on the provided labels.
+ category_slug: haystack-api
+ title: Classifiers
+ slug: classifiers-api
+ order: 10
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: classifiers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/connectors.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/connectors.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b53b4bb80f9b138ac4d3468779b54c4f3a3ac421
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/connectors.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/connectors]
+ modules: ["openapi_service"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Various connectors to integrate with external services.
+ category_slug: haystack-api
+ title: Connectors
+ slug: connectors-api
+ order: 15
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: connectors_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/converters_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/converters_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6c89138c98fd2c93cb72c4bf20ef3117a9f00657
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/converters_api.yml
@@ -0,0 +1,42 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/converters]
+ modules:
+ [
+ "azure",
+ "csv",
+ "docx",
+ "html",
+ "json",
+ "markdown",
+ "openapi_functions",
+ "output_adapter",
+ "pdfminer",
+ "pptx",
+ "pypdf",
+ "tika",
+ "txt",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Various converters to transform data from one format to another.
+ category_slug: haystack-api
+ title: Converters
+ slug: converters-api
+ order: 20
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: converters_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/data_classess_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/data_classess_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a67f28db9d4412faa315d6964a074ab261904f1c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/data_classess_api.yml
@@ -0,0 +1,28 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/dataclasses]
+ modules:
+ ["answer", "byte_stream", "chat_message", "document", "streaming_chunk", "sparse_embedding"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Core classes that carry data through the system.
+ category_slug: haystack-api
+ title: Data Classes
+ slug: data-classes-api
+ order: 30
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: data_classess_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/document_stores_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/document_stores_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f9744e197723ef7f9ed42f31543cec17c774cfad
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/document_stores_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/document_stores/in_memory]
+ modules: ["document_store"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Stores your texts and meta data and provides them to the Retriever at query time.
+ category_slug: haystack-api
+ title: Document Stores
+ slug: document-stores-api
+ order: 40
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: document_stores_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/document_writers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/document_writers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1d16ae84a4df1c55dd8604faedd3cb9e565d8c9e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/document_writers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/writers]
+ modules: ["document_writer"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Writes Documents to a DocumentStore.
+ category_slug: haystack-api
+ title: Document Writers
+ slug: document-writers-api
+ order: 50
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: document_writers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/embedders_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/embedders_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9528039aa0c236fc2833850d17f6856632732489
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/embedders_api.yml
@@ -0,0 +1,37 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/embedders]
+ modules:
+ [
+ "azure_document_embedder",
+ "azure_text_embedder",
+ "hugging_face_api_document_embedder",
+ "hugging_face_api_text_embedder",
+ "openai_document_embedder",
+ "openai_text_embedder",
+ "sentence_transformers_document_embedder",
+ "sentence_transformers_text_embedder",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Transforms queries into vectors to look for similar or relevant Documents.
+ category_slug: haystack-api
+ title: Embedders
+ slug: embedders-api
+ order: 60
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: embedders_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/evaluation_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/evaluation_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e445e9a56802c593ce22f80a0275a36a15ace76b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/evaluation_api.yml
@@ -0,0 +1,31 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/evaluation]
+ modules:
+ [
+ "base",
+ "eval_run_result",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Represents the results of evaluation.
+ category_slug: haystack-api
+ title: Evaluation
+ slug: evaluation-api
+ order: 61
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: evaluation_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/evaluators_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/evaluators_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9a8460b94ad6ab272bd0074a7afecb19eb21f269
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/evaluators_api.yml
@@ -0,0 +1,38 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/evaluators]
+ modules:
+ [
+ "answer_exact_match",
+ "context_relevance",
+ "document_map",
+ "document_mrr",
+ "document_ndcg",
+ "document_recall",
+ "faithfulness",
+ "llm_evaluator",
+ "sas_evaluator",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Evaluate your pipelines or individual components.
+ category_slug: haystack-api
+ title: Evaluators
+ slug: evaluators-api
+ order: 63
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: evaluators_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/extractors_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/extractors_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..76ee7f0b15a77f3322eadb816fc04c91abfc059b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/extractors_api.yml
@@ -0,0 +1,29 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/extractors]
+ modules: ["named_entity_extractor"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: filter
+ expression: "name not in ['_BackendEnumMeta', '_NerBackend', '_HfBackend', '_SpacyBackend']"
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Extracts predefined entities out of a piece of text.
+ category_slug: haystack-api
+ title: Extractors
+ slug: extractors-api
+ order: 65
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: extractors_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/fetchers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/fetchers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9e021d3ef4527f3e502dfd90b558061628a7305c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/fetchers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/fetchers]
+ modules: ["link_content"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Fetches content from a list of URLs and returns a list of extracted content streams.
+ category_slug: haystack-api
+ title: Fetchers
+ slug: fetchers-api
+ order: 80
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: fetchers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/generators_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/generators_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4fcfa74879922375ef2a7a6d7d4f3d051546e0fc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/generators_api.yml
@@ -0,0 +1,37 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/generators]
+ modules:
+ [
+ "azure",
+ "hugging_face_local",
+ "hugging_face_api",
+ "openai",
+ "chat/azure",
+ "chat/hugging_face_local",
+ "chat/hugging_face_api",
+ "chat/openai",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Enables text generation using LLMs.
+ category_slug: haystack-api
+ title: Generators
+ slug: generators-api
+ order: 70
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: generators_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/joiners_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/joiners_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..72708f4400d401791c1681c6dcf6c30bef05dc47
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/joiners_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/joiners]
+ modules: ["document_joiner", "branch", "answer_joiner", "string_joiner"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Components that join list of different objects
+ category_slug: haystack-api
+ title: Joiners
+ slug: joiners-api
+ order: 75
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: joiners_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/pipeline_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/pipeline_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d720152510ae3b06f367fbc593991882084cb744
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/pipeline_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/core/pipeline]
+ modules: ["pipeline"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Arranges components and integrations in flow.
+ category_slug: haystack-api
+ title: Pipeline
+ slug: pipeline-api
+ order: 90
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: pipeline_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/preprocessors_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/preprocessors_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c27e01be340f3cfb1eeb618a86c45278ba2e1690
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/preprocessors_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/preprocessors]
+ modules: ["document_cleaner", "document_splitter", "text_cleaner", "nltk_document_splitter"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Preprocess your Documents and texts. Clean, split, and more.
+ category_slug: haystack-api
+ title: PreProcessors
+ slug: preprocessors-api
+ order: 100
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: preprocessors_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/rankers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/rankers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..31dc468ad863357e39822e3f8f9f74646b04022f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/rankers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/rankers]
+ modules: ["lost_in_the_middle", "meta_field", "transformers_similarity", "sentence_transformers_diversity"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Reorders a set of Documents based on their relevance to the query.
+ category_slug: haystack-api
+ title: Rankers
+ slug: rankers-api
+ order: 110
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: rankers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/readers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/readers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ef0974b78ae19473c812d124497c9a9c94678e64
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/readers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/readers]
+ modules: ["extractive"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Takes a query and a set of Documents as input and returns ExtractedAnswers by selecting a text span within the Documents.
+ category_slug: haystack-api
+ title: Readers
+ slug: readers-api
+ order: 120
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: readers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/retrievers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/retrievers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f0e8ebd1ca70a09890629311c4b6adb099ff16
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/retrievers_api.yml
@@ -0,0 +1,33 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/retrievers]
+ modules:
+ [
+ "in_memory/bm25_retriever",
+ "in_memory/embedding_retriever",
+ "filter_retriever",
+ "sentence_window_retriever",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Sweeps through a Document Store and returns a set of candidate Documents that are relevant to the query.
+ category_slug: haystack-api
+ title: Retrievers
+ slug: retrievers-api
+ order: 130
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: retrievers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/routers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/routers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..126d08d7ea1fdf5f16b51bb1849b54f1959c43ef
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/routers_api.yml
@@ -0,0 +1,35 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/routers]
+ modules:
+ [
+ "conditional_router",
+ "file_type_router",
+ "metadata_router",
+ "text_language_router",
+ "transformers_text_router",
+ "zero_shot_text_router",
+ ]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Routers is a group of components that route queries or Documents to other components that can handle them best.
+ category_slug: haystack-api
+ title: Routers
+ slug: routers-api
+ order: 140
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: routers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/samplers_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/samplers_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f0a5bdbca98ff96fa7f3418456ff5ec87a19b1a2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/samplers_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/samplers]
+ modules: ["top_p"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Filters documents based on their similarity scores using top-p sampling.
+ category_slug: haystack-api
+ title: Samplers
+ slug: samplers-api
+ order: 150
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: samplers_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/utils_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/utils_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3b32a0276d5a71cf6b05cb238802ae2f4f67ecbd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/utils_api.yml
@@ -0,0 +1,28 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/utils]
+ ignore_when_discovered: ["__init__", "hf"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: filter
+ expression: "name not in ['TokenSecret', 'EnvVarSecret','_get_default_device', '_split_device_string', 'convert']"
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Utility functions and classes used across the library.
+ category_slug: haystack-api
+ title: Utils
+ slug: utils-api
+ order: 153
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: utils_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/validators_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/validators_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..48b452eeeb5a4d4fec1f4a36ee47fd36f8040d1e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/validators_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/validators]
+ modules: ["json_schema"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Validators validate LLM outputs
+ category_slug: haystack-api
+ title: Validators
+ slug: validators-api
+ order: 155
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: validators_api.md
diff --git a/testbed/deepset-ai__haystack/docs/pydoc/config/websearch_api.yml b/testbed/deepset-ai__haystack/docs/pydoc/config/websearch_api.yml
new file mode 100644
index 0000000000000000000000000000000000000000..daa08fd5f1bcbb28c823db2992668d5d439d755c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/docs/pydoc/config/websearch_api.yml
@@ -0,0 +1,27 @@
+loaders:
+ - type: haystack_pydoc_tools.loaders.CustomPythonLoader
+ search_path: [../../../haystack/components/websearch]
+ modules: ["serper_dev", "searchapi"]
+ ignore_when_discovered: ["__init__"]
+processors:
+ - type: filter
+ expression:
+ documented_only: true
+ do_not_filter_modules: false
+ skip_empty_modules: true
+ - type: smart
+ - type: crossref
+renderer:
+ type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
+ excerpt: Web search engine for Haystack.
+ category_slug: haystack-api
+ title: Websearch
+ slug: websearch-api
+ order: 170
+ markdown:
+ descriptive_class_title: false
+ classdef_code_block: false
+ descriptive_module_title: true
+ add_method_class_prefix: true
+ add_member_class_prefix: false
+ filename: websearch_api.md
diff --git a/testbed/deepset-ai__haystack/examples/README.md b/testbed/deepset-ai__haystack/examples/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..09feb7f18b84b1c6abd9ada958e8631e49c17109
--- /dev/null
+++ b/testbed/deepset-ai__haystack/examples/README.md
@@ -0,0 +1,5 @@
+# Examples have been moved!
+
+If you're searching for Haystack examples we moved them into a dedicated repository.
+
+You can find all the example cookbooks [👉 here 👈](https://github.com/deepset-ai/haystack-cookbook/).
diff --git a/testbed/deepset-ai__haystack/license-header.txt b/testbed/deepset-ai__haystack/license-header.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ec46732299642d1651e93564ccd09cacb8d264eb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/license-header.txt
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: ${inceptionYear}-present ${copyrightOwner}
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/testbed/deepset-ai__haystack/licenserc.toml b/testbed/deepset-ai__haystack/licenserc.toml
new file mode 100644
index 0000000000000000000000000000000000000000..eb0e810056d1b88af2a0c0cf72fc41dc02cadb2e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/licenserc.toml
@@ -0,0 +1,23 @@
+headerPath = "license-header.txt"
+
+excludes = [
+ ".github",
+ "docker",
+ "docs",
+ "examples",
+ "proposals",
+ "releasenotes",
+ "test",
+ "CITATION.cff",
+ "*.ini",
+ "*.jinja2",
+ "*.md",
+ "*.toml",
+ "*.txt",
+ "*.yaml",
+ "*.wav",
+]
+
+[properties]
+inceptionYear = 2022
+copyrightOwner = "deepset GmbH "
diff --git a/testbed/deepset-ai__haystack/proposals/0000-template.md b/testbed/deepset-ai__haystack/proposals/0000-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6b5ac209b717233bbdefc5e16bb59f68bdc8fe4
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/0000-template.md
@@ -0,0 +1,64 @@
+- Title: (the title in plain english)
+- Decision driver: (your name here)
+- Start Date: (today's date, in format YYYY-MM-DD)
+- Proposal PR: (fill in after opening the PR)
+- Github Issue or Discussion: (only if available, link the original request for this change)
+
+# Summary
+
+Explain the feature briefly.
+
+# Basic example
+
+When applicable, write a snippet of code showing how the new feature would
+be used.
+
+# Motivation
+
+Give us more background and explanation: Why do we need this feature? What use cases does it support? What's the expected
+outcome?
+
+Focus on explaining the motivation for this feature. We'd like to understand it, so that even if we don't accept this
+proposal, others can use the motivation to develop alternative solutions.
+
+# Detailed design
+
+This is the bulk of the proposal. Explain the design in enough detail for somebody
+familiar with Haystack to understand, and for somebody familiar with the
+implementation to implement. Get into specifics and corner-cases,
+and include examples of how the feature is used. Also, if there's any new terminology involved,
+define it here.
+
+# Drawbacks
+
+Look at the feature from the other side: what are the reasons why we should _not_ work on it? Consider the following:
+
+- What's the implementation cost, both in terms of code size and complexity?
+- Can the solution you're proposing be implemented as a separate package, outside of Haystack?
+- Does it teach people more about Haystack?
+- How does this feature integrate with other existing and planned features?
+- What's the cost of migrating existing Haystack pipelines (is it a breaking change?)?
+
+There are tradeoffs to choosing any path. Attempt to identify them here.
+
+# Alternatives
+
+What other designs have you considered? What's the impact of not adding this feature?
+
+# Adoption strategy
+
+If we implement this proposal, how will the existing Haystack users adopt it? Is
+this a breaking change? Can we write a migration script?
+
+# How we teach this
+
+Would implementing this feature mean the documentation must be re-organized
+or updated? Does it change how Haystack is taught to new developers at any level?
+
+How should this feature be taught to the existing Haystack users (for example with a page in the docs,
+a tutorial, ...).
+
+# Unresolved questions
+
+Optional, but suggested for first drafts. What parts of the design are still
+TBD?
diff --git a/testbed/deepset-ai__haystack/proposals/README.md b/testbed/deepset-ai__haystack/proposals/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8ddf37edeb51cfd25650cba13f5672d5eefe594f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/README.md
@@ -0,0 +1,120 @@
+# Haystack proposals design process
+
+Most of the changes to Haystack, including bug fixes and small improvements,
+are implemented through the normal Pull Request workflow, according to our
+[contribution guidelines](../CONTRIBUTING.md).
+
+Some changes, though, are "substantial", and these are the ones we want to put through a bit
+of a design process to make sure we're all on the same page before we invest the time
+into the actual implementation of a new feature or a deep refactoring.
+
+We've introduced the "Proposals design process" to provide a
+consistent and controlled path for such changes to Haystack.
+
+We will apply the same level of rigor to both core developers' and
+Community's proposals. The primary difference between them is in the design phase:
+core developers proposals tend to be submitted at the end of the design process
+whereas the Community ones tend to be submitted at the beginning, as a way
+to kickstart it.
+
+## When do I follow the process?
+
+Follow the process if you intend to make "substantial" changes to Haystack, `rest_api` or the process itself. What is
+defined as a "substantial" change is evolving based on community norms and on what part of the project you are proposing
+to change, but it may include the following:
+
+- A new feature that creates new API surface areas.
+- A new component (Nodes, Pipelines, Document Stores).
+- Removing features that already shipped in the current minor version.
+- A deep refactoring that would require new tests or introduce new dependencies.
+- A change that's complex enough to require multiple steps to be delivered.
+
+Some changes don't require a proposal, for example:
+
+- Minor bug fixes.
+- Rephrasing, reorganizing, or otherwise "changing shape does not change meaning".
+- Addition and removal of warnings or other error messages.
+- Additions only likely to be noticed by other contributors, invisible to Haystack users.
+
+In any case, the core developers might politely ask you to submit a proposal before merging
+a new feature when they see fit.
+
+## Before creating a proposal
+
+Laying some groundwork ahead of the proposal can make the process smoother.
+
+Although there is no single way to prepare for submitting a proposal, it is generally a good idea
+to collect feedback from other project developers first, to make sure that the change is
+is actually needed. As we're an open source community where everyone can impact the project, we all
+need to make an effort to build consensus.
+
+When you're preparing for writing and submitting a proposal, talk the idea over on our official
+[Discord server](https://haystack.deepset.ai/community/join) and in a Github
+issue or discussion in the [Haystack repository](https://github.com/deepset-ai/haystack).
+
+## The process
+
+To get a major feature added to Haystack, you first merge the proposal into the Haystack repo as a Markdown file.
+At that point, the proposal can be implemented and eventually included into the codebase.
+
+There are several people involved in the process:
+- **Decision Driver**: the person creating the proposal. If the Decision Driver is not a core contributor themselves,
+ one will be assigned to the PR and will take care of facilitating the process.
+- **Input Givers**: anybody reviewing or commenting the PR.
+- **Approvers**: the core contributors approving the PR.
+
+During its lifecycle, a proposal can transition between the following states:
+- **Review**: proposal is getting feedback.
+- **Final Comment**: proposal received approval from 3 core contributors; this state must be kept for a grace period of
+ 3 calendar days.
+- **Active**: proposal was approved and merged and can be implemented if not already.
+- **Stale**: proposal didn't get any update in the last 30 days and will be closed after a grace period of 10 days.
+- **Rejected**: proposal was actively rejected and the reasons explained.
+
+
+To create a proposal:
+
+1. Copy `0000-template.md` to `text/0000-my-feature.md`, where 'my-feature' is a descriptive name of the feature you're
+ proposing. Don't assign an identification number yet.
+2. Fill in the proposal. Pay attention to details. Proposals that present convincing motivation,
+ demonstrate an understanding of the feature impact, and honestly present the drawbacks and
+ alternatives tend to be received well.
+3. Submit a pull request. This ensures the document receives design feedback from a larger community,
+ and as the Decision Driver, you should be prepared to revise it in response.
+4. Rename the file using the PR number, for example from `text/0000-my-feature.md` to `text/4242-my-feature.md`. The
+ proposal is now in **Review** state.
+5. Reach an agreement with the Input Givers and integrate the feedback you got. Proposals that have broad support are
+ much more likely to make progress than those that don't receive any comments.
+6. Now it's time for the Approvers to decide whether the proposal is a candidate for inclusion in Haystack. Note that a
+ review from the core contributors may take a long time, and getting early feedback from members of the Community can
+ ease the process.
+7. When the proposal enters the **Final Comment** state (see above), the PR will be marked accordingly, entering a
+ grace period lasting 3 calendar days during which a proposal can be modified based on feedback from core developers
+ or the Community. Big changes may trigger a new final comment period.
+8. Approvers may reject a proposal once the public discussion and adding comments are over, adding the reason for
+ rejection. A core developer then closes the related PR. The proposal gets the **Rejected** state.
+9. When the final comment period ends, the PR is merged and the proposal becomes **Active**.
+
+## What happens next
+
+Once a proposal becomes active, the authors are free to implement it and submit the feature as one or more pull
+requests to the Haystack repo. Becoming 'active' is not a rubber stamp, and in particular still doesn't
+mean the feature will ultimately be merged; it does mean that the core team has agreed to it in
+principle and is open to merging it if the implementation reflects the contents of the proposal.
+
+The fact that a given proposal has been accepted and is 'active' doesn't imply it has a priority assigned or somebody's
+currently working on it.
+
+To change an active proposal, open follow-up PRs. Our goal is to write each proposal so that
+it reflects the final design of the feature, but the nature of the process means that we cannot
+expect every merged proposal to actually reflect what the end result will be at the time of the next release.
+That's why we try to keep each proposal document somewhat in sync with the feature as planned, tracking such
+changes through follow-up pull requests to the document.
+
+As the author of a proposal, you're not obligated to implement it. Of course, the author (like any other developer)
+is welcome to post an implementation for review after the proposal has been accepted.
+
+## Inspiration
+
+Haystack's proposals design process process owes its inspiration to the [React](https://github.com/reactjs/rfcs) and
+[Rust](https://github.com/rust-lang/rfcs) RFC processes. We're open to changing it if needed.
diff --git a/testbed/deepset-ai__haystack/proposals/text/2170-pydantic-dataclasses.md b/testbed/deepset-ai__haystack/proposals/text/2170-pydantic-dataclasses.md
new file mode 100644
index 0000000000000000000000000000000000000000..a30f94d8279c88b5c7f11dfc1b997ae08f79c623
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/2170-pydantic-dataclasses.md
@@ -0,0 +1,80 @@
+
+- Start Date: 2021-10-14
+- Proposal PR: n/a
+- Github Issue: https://github.com/deepset-ai/haystack/pull/1598
+- Deciders: @tholor
+
+
+## Context and Problem Statement
+
+Originally we implemented Haystack's primitive based on Python's vanilla `dataclasses`. However, shortly after we realized this causes issues with FastAPI, which uses Pydantic's implementation. We need to decide which version (vanilla Python's or Pydantic's) to use in our codebase.
+
+## Decision Drivers
+
+* The Swagger autogenerated documentation for REST API in FastAPI was broken where the dataclasses include non-standard fields (`pd.dataframe` + `np.ndarray`)
+
+## Considered Options
+
+* Switch to Pydantic `dataclasses` in our codebase as well.
+* Staying with vanilla `dataclasses` and find a workaround for FastAPI to accept them in place of Pydantic's implementation.
+
+## Decision Outcome
+
+Chosen option: **1**, because our initial concerns about speed proved negligible and Pydantic's implementation provided some additional functionality for free (see below).
+
+### Positive Consequences
+
+* We can now inherit directly from the primitives in the REST API dataclasses, and overwrite the problematic fields with standard types.
+* We now get runtime type checks "for free", as this is a core feature of Pydantic's implementation.
+
+### Negative Consequences
+
+* Pydantic dataclasses are slower. See https://github.com/deepset-ai/haystack/pull/1598 for a rough performance assessment.
+* Pydantic dataclasses do not play nice with mypy and autocomplete tools unaided. In many cases a complex import statement, such as the following, is needed:
+
+```python
+if typing.TYPE_CHECKING:
+ from dataclasses import dataclass
+else:
+ from pydantic.dataclasses import dataclass
+```
+
+## Pros and Cons of the Options
+
+### Switch to Pydantic `dataclasses`
+
+* Good, because it solves the issue without having to find workarounds for FastAPI.
+* Good, because it adds type checks at runtime.
+* Bad, because mypy and autocomplete tools need assistance to parse its dataclasses properly. Example:
+
+```python
+if typing.TYPE_CHECKING:
+ from dataclasses import dataclass
+else:
+ from pydantic.dataclasses import dataclass
+```
+
+* Bad, because it introduces an additional dependency to Haystack (negligible)
+* Bad, because it adds some overhead on the creation of primitives (negligible)
+
+### Staying with vanilla `dataclasses`
+
+* Good, because it's Python's standard way to generate data classes
+* Good, because mypy can deal with them without plugins or other tricks.
+* Good, because it's faster than Pydantic's implementation.
+* Bad, because does not play well with FastAPI and Swagger (critical).
+* Bad, because it has no validation at runtime (negligible)
+
+## Links
+
+* https://pydantic-docs.helpmanual.io/usage/dataclasses/
+* https://github.com/deepset-ai/haystack/pull/1598
+* https://github.com/deepset-ai/haystack/issues/1593
+* https://github.com/deepset-ai/haystack/issues/1582
+* https://github.com/deepset-ai/haystack/pull/1398
+* https://github.com/deepset-ai/haystack/issues/1232
+
+
diff --git a/testbed/deepset-ai__haystack/proposals/text/3550-csv-converter.md b/testbed/deepset-ai__haystack/proposals/text/3550-csv-converter.md
new file mode 100644
index 0000000000000000000000000000000000000000..aa905dd2162ff974c9a394b3dbcc2bd78ae9c53a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3550-csv-converter.md
@@ -0,0 +1,86 @@
+- Start Date: 2022-11-28
+- Proposal PR: [#3638](https://github.com/deepset-ai/haystack/issues/3638)
+- Github Issue: [#3550](https://github.com/deepset-ai/haystack/issues/3550)
+
+# Summary
+
+Using Haystack for questions answering pipelines is prettier easy, but most of the time users have CSV files containing
+their knowledge base with questions and there associated answers.
+Unfortunately there is no easy way to dynamically update the knowledge base or import new data from CSV though rest API
+using YAML, as there are no CSV parser.
+
+Having a basic way to dynamically index a CSV file always requires development of a new nodes.
+
+# Basic example
+
+To define an FAQ query and **indexing** pipeline we would then simply do :
+```yaml
+# To allow your IDE to autocomplete and validate your YAML pipelines, name them as .haystack-pipeline.yml
+
+version: ignore
+
+components: # define all the building-blocks for Pipeline
+ - name: DocumentStore
+ type: ElasticsearchDocumentStore
+ params:
+ host: localhost
+ embedding_field: question_emb
+ embedding_dim: 384
+ excluded_meta_data:
+ - question_emb
+ similarity: cosine
+ - name: Retriever
+ type: EmbeddingRetriever
+ params:
+ document_store: DocumentStore # params can reference other components defined in the YAML
+ embedding_model: sentence-transformers/all-MiniLM-L6-v2
+ scale_score: False
+ - name: CSVConverter
+ type: CsvTextConverter
+
+pipelines:
+ - name: indexing
+ nodes:
+ - name: CSVConverter
+ inputs: [File]
+ - name: Retriever
+ inputs: [ CSVConverter ]
+ - name: DocumentStore
+ inputs: [ Retriever ]
+```
+
+# Motivation
+
+Using YAML pipeline description it's not possible to describe a CSV FAQ indexing pipeline that takes CSV files as input
+containing questions and answers and index them. It's a basic usage that still requires coding.
+
+As we are presenting a tutorial `Utilizing Existing FAQs for Question Answering` it would be great to have this basic
+node so that anyone can quickly run an FAQ Question Answering pipeline using only a YAML description and import their
+CSV though REST API.
+
+# Detailed design
+
+I've added a new node: **`CsvTextConverter`** . It takes a file input, parse it as FAQ CSV file having `question` and `answer` column
+and outputs `Document`s.
+
+For now the node is very simple: can only handle a fixed CSV format and no other tabular data. It also
+can't produce documents that are not of type `text`. These shortcomings can be addressed in later enhancements.
+
+# Drawbacks
+
+We could consider that developing this custom node is easy and a good way to learn Haystack,
+but casual users shouldn't need to know this much before being able to index CSV files.
+
+# Alternatives
+
+Didn't consider any other design.
+
+# Adoption strategy
+
+It doesn't introduce any breaking change, any users having FAQs pipeline would be able to use the official nodes instead
+of their existing ones.
+
+# How we teach this
+
+This may require updating this tutorial [Utilizing Existing FAQs for Question Answering](https://haystack.deepset.ai/tutorials/04_faq_style_qa)
+and to document those 2 nodes.
diff --git a/testbed/deepset-ai__haystack/proposals/text/3558-embedding_retriever.md b/testbed/deepset-ai__haystack/proposals/text/3558-embedding_retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c76f771793151d3175b9d032c281a3bff0a556f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3558-embedding_retriever.md
@@ -0,0 +1,131 @@
+- Start Date: 2022-11-11
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3558
+- Github Issue:
+
+> ⚠️ Superseded by https://github.com/deepset-ai/haystack/blob/main/proposals/text/5390-embedders.md
+
+ # Summary
+
+- Current EmbeddingRetriever doesn't allow Haystack users to provide new embedding methods and is
+ currently constricted to farm, transformers, sentence transformers, OpenAI and Cohere based
+ embedding approaches. Any new encoding methods need to be explicitly added to Haystack
+ and registered with the EmbeddingRetriever.
+
+
+- We should allow users to easily plug-in new embedding methods to EmbeddingRetriever. For example, a Haystack user should be able to
+ add custom embeddings without having to commit additional code to Haystack repository.
+
+ # Basic example
+ EmbeddingRetriever is instantiated with:
+
+ ``` python
+ retriever = EmbeddingRetriever(
+ document_store=document_store,
+ embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
+ model_format="sentence_transformers",
+ )
+ ```
+- The current approach doesn't provide a pluggable abstraction point of composition but
+ rather attempts to satisfy various embedding methodologies by having a lot of
+ parameters which keep ever expanding.
+
+
+- The new approach allows creation of the underlying embedding mechanism (EmbeddingEncoder)
+ which is then in turn plugged into EmbeddingRetriever. For example:
+
+ ``` python
+ encoder = SomeNewFancyEmbeddingEncoder(api_key="asdfklklja",
+ query_model="text-search-query",
+ doc_model="text-search-doc")
+ ```
+
+- EmbeddingEncoder is then used for the creation of EmbeddingRetriever. EmbeddingRetriever
+ init method doesn't get polluted with additional parameters as all of the peculiarities
+ of a particular encoder methodology are contained on in its abstraction layer.
+
+ ``` python
+ retriever = EmbeddingRetriever(
+ document_store=document_store,
+ encoder=encoder
+ )
+ ```
+
+ # Motivation
+
+- Why are we doing this? What use cases does it support? What is the expected outcome?
+
+ We could certainly keep the current solution as is; it does implement a decent level
+ of composition/decoration to lower coupling between EmbeddingRetriever and the underlying
+ mechanism of embedding (sentence transformers, OpenAI, etc). However, the current mechanism
+ in place basically hard-codes available embedding implementations and prevents our users from
+ adding new embedding mechanism by themselves outside of Haystack repository. We also might
+ want to have a non-public dC embedding mechanism in the future. In the current design a non-public
+ dC embedding mechanism would be impractical. In addition, the more underlying implementations we
+ add we'll continue to "pollute" EmbeddingRetriever init method with more and more parameters.
+ This is certainly less than ideal long term.
+
+
+- EmbeddingEncoder classes should be subclasses of BaseComponent! As subclasses of BaseComponent,
+ we can use them outside the EmbeddingRetriever context in indexing pipelines, generating the
+ embeddings. We are currently employing a kludge of using Retrievers which is quite counter-intuitive
+ and confusing for our users.
+
+
+- EmbeddingEncoder classes might sound overly complicated, especially with a distinguishing mechanism
+ name pre-appended (i.e CohereEmbeddingEncoder). Therefore, we'll adopt Embedder
+ naming scheme, i.e. CohereEmbedder, SentenceTransformerEmbedder and so on.
+
+ # Detailed design
+
+- Our new EmbeddingRetriever would still wrap the underlying encoding mechanism in the form of
+ _BaseEmbedder. _BaseEmbedder still needs to implement methods:
+ - embed_queries
+ - embed_documents
+
+
+- The new design approach differs is in the creation of EmbeddingRetriever - rather than hiding the underlying encoding
+ mechanism one could simply create the EmbeddingRetriever with a specific encoder directly. For example:
+
+ ```
+ retriever = EmbeddingRetriever(
+ document_store=document_store,
+ encoder=OpenAIEmbedder(api_key="asdfklklja", model="ada"),
+ #additional EmbeddingRetriever-abstraction-level parameters
+ )
+ ```
+
+- If the "two-step approach" of EmbeddingRetriever initialization is no longer the ideal solution (issues with current
+ schema generation and loading/saving via YAML pipelines) we might simply add the EmbeddingRetriever
+ class for every supported encoding approach. For example, we could have OpenAIEmbeddingRetriever, CohereEmbeddingRetriever,
+ SentenceTransformerEmbeddingRetriever and so on. Each of these retrievers will delegate the bulk of the work to an
+ existing EmbeddingRetriever with a per-class-specific Embedder set in the class constructor (for that custom
+ encoding part). We'll get the best of both worlds. Each EmeddingRetriever will have only the relevant primitives
+ parameters for the **init()** constructor; the underlying EmbeddingRetriever attribute in EmeddingRetriever
+ will handle most of the business logic of retrieving, yet each retriever will use an appropriate per-class-specific
+ Embedder for the custom encoding part.
+
+
+
+ # Drawbacks
+- The main shortcoming are:
+ - The "two-step approach" in EmbeddingRetriever initialization
+ - Likely be an issue for the current schema generation and loading/saving via YAML pipelines (see solution above)
+ - It is a API breaking change so it'll require code update for all EmbeddingRetriever usage both in our codebase and for Haystack users
+ - Can only be done in major release along with other breaking changes
+
+ # Alternatives
+
+ We could certainly keep everything as is :-)
+
+ # Adoption strategy
+- As it is a breaking change, we should implement it for the next major release.
+
+ # How do we teach this?
+- This change would require only a minor change in documentation.
+- The concept of embedding retriever remains, just the mechanics are slightly changed
+- All docs and tutorials need to be updated
+- Haystack users are informed about a possibility to create and use their own embedders for embedding retriever.
+- # Unresolved questions
+
+ Optional, but suggested for first drafts. What parts of the design are still
+ TBD?
diff --git a/testbed/deepset-ai__haystack/proposals/text/3665-prompt-node.md b/testbed/deepset-ai__haystack/proposals/text/3665-prompt-node.md
new file mode 100644
index 0000000000000000000000000000000000000000..fccf7527c93315f5084fa125fde6ce3f2a5f4fbf
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3665-prompt-node.md
@@ -0,0 +1,445 @@
+- Start Date: 2022-12-04
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3665
+- Github Issue: https://github.com/deepset-ai/haystack/issues/3306
+- Example Colab notebook: https://colab.research.google.com/drive/1bOIxb8cnpTrpMtTSBArY9FJlL59Ar4K_?usp=sharing
+
+ # Summary
+
+- The PromptNode class is the central abstraction in Haystack's large language model (LLM) support.
+ Such a component supports multiple NLP tasks out-of-the-box. PromptNode allows users to
+ perform multiple tasks, such as summarization, question answering, question generation etc. using a single,
+ unified model within the Haystack framework.
+
+
+- One of the benefits of PromptNode is that it allows users to define and add additional tasks
+ that are supported by the model. This can enable users to extend the capabilities of the model and
+ to use it for a wider range of NLP tasks within the Haystack ecosystem.
+
+
+ # Basic example
+
+ PromptNode is instantiated with the underlying LLM model and prompted by using natural language:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ pn = PromptNode(model_name_or_path="google/flan-t5-base")
+ pn("What is the capital of Germany?")
+
+ ----------------------------------------------------------------------------
+ ['berlin']
+ ```
+
+ # Motivation
+
+
+- The use of large language models (LLMs) has become increasingly popular in natural language
+ processing (NLP) due to their ability to capture complex and nuanced patterns in language.
+ PromptNode allows users to leverage the power of LLMs in the Haystack ecosystem, and
+ to perform multiple NLP tasks using a single, unified model. This provides a flexible and efficient
+ tool for NLP in Haystack, and can enable users to improve the performance and reliability of their applications.
+
+
+- Modern LLM support hundreds if not thousands of tasks. Aside from PromptNode we'll define prompt templates for
+ dozen or so most popular NLP tasks and allow users to register prompt templates for additional tasks. The
+ extensible and modular approach would allow users to extend the capabilities of the model and to use
+ it for a wider range of NLP tasks within the Haystack ecosystem. Prompt engineers would define templates
+ for each NLP task and register them with the PromptNode. The burden of defining the best templates for each task
+ would be on the prompt engineers and not on the users.
+
+
+- The use of templates to define NLP tasks can make it easier for users to use PromptNode, as
+ they do not need to know the details of how the model works or how to define tasks for it. This can
+ reduce the learning curve and make it easier for users to get started with PromptNode and
+ to leverage the power of LLMs in Haystack.
+
+
+- The extensible and modular approach of PromptNode allows users to easily add support for
+ additional templates, even on-the-fly, which can enable them to extend the capabilities of the model and to use it for
+ a wider range of NLP tasks. This can provide users with more flexibility and control over the model,
+ and can enable them to tailor it to their specific needs and applications.
+
+ # Detailed design
+
+- The PromptNode class is the most important abstraction in Haystack's large language model (LLM) support.
+ In addition to PromptNode class, we'll also define a set of prompt templates for the most popular NLP tasks.
+
+
+- NLP prompt templates will be represented by `PromptTemplate` class.
+
+ ``` python
+ class PromptTemplate(BaseTemplate):
+
+ name: str
+ prompt_text: str
+ input_variables: List[str]
+
+ ````
+
+ PromptNode would, out-of-the-box, support 10-20 default NLP tasks defined by PromptTemplate instances. However, it would
+ allow registering additional templates with PromptNode.
+
+
+- The prompt templates for default tasks (question-answering,question-generation, summarization etc.) could be examined by the user
+ using `get_prompt_templates_names` class method of the PromptNode. For example:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ PromptNode.get_prompt_templates_names()
+
+ ----------------------------------------------------------------------------
+ ['question-answering',
+ 'question-generation',
+ 'conditioned-question-generation',
+ 'summarization',
+ 'question-answering-check']
+ ```
+
+
+- PromptNode supports natural language prompting (using `prompt` method) by specifying prompt template method parameter. For example:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ pn = PromptNode(model_name_or_path="google/flan-t5-base")
+ pn.prompt("question-generation", documents=["Berlin is the capital of Germany."])
+
+ ----------------------------------------------------------------------------
+ ['What is the capital of Germany?']
+ ```
+
+- PromptNode supports selecting a particular default template for a certain task (e.g. question-generation) and then subsequently
+ using the selected template until user changes the current template. For example:
+
+ ``` python
+ qa = pn.use_prompt_template("deepset/question-generation-v2")
+ qa(documents=["Berlin is the capital of Germany."])
+
+ ----------------------------------------------------------------------------
+ ['What is the capital of Germany?']
+ ```
+
+- The addition of new prompt templates is supported by the `add_prompt_template` method. For example:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ PromptNode.add_prompt_template(PromptTemplate(name="sentiment-analysis",
+ prompt_text="Please give a sentiment for this context. Answer with positive, "
+ "negative or neutral. Context: $documents; Answer:",
+ input_variables=["documents"]))
+ PromptNode.get_prompt_templates_names()
+
+ ----------------------------------------------------------------------------
+ ['question-answering',
+ 'question-generation',
+ 'conditioned-question-generation',
+ 'summarization',
+ 'question-answering-check',
+ 'sentiment-analysis']
+ ```
+
+- Users can inspect registered prompt templates with two class methods: `get_prompt_templates_names` and `get_prompt_templates`. The first
+ method, as we have seen, simply lists the names of the supported templates while the second method returns the list of `PromptTemplate`
+ instances, in readable format, allowing users to inspect the actual prompt template used and the templates input parameters.
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ PromptNode.get_prompt_templates()
+
+ ----------------------------------------------------------------------------
+ [PromptTemplate(name="sentiment-analysis",
+ prompt_text="Please give a sentiment for this context. Answer with positive, "
+ "negative or neutral. Context: $documents; Answer:",
+ input_variables=["documents"], ...]
+ ```
+
+
+
+- However, aside from existing templates, users should also be able to use "on-the-fly" templates without registering them first. For example:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ pn = PromptNode(model_name_or_path="google/flan-t5-base")
+ prompt_template = PromptTemplate(name="sentiment-analysis",
+ prompt_text="Please give a sentiment for this context. "
+ "Answer with positive, negative or neutral. Context: $documents; Answer:",
+ input_variables=["documents"])
+ pn.prompt(prompt_template, documents=["I really enjoyed the recent movie."])
+
+ ----------------------------------------------------------------------------
+ ['positive']
+ ```
+ This, "on-the-fly" approach might be handy if users want to simply try stuff out
+
+
+- Therefore, the most central API method of the PromptNode class would be the `prompt` method with the following signature:
+ ``` python
+ def prompt(self, prompt_template: Union[str, PromptTemplate] = None, *args, **kwargs) -> List[str]:
+ ```
+
+
+- PromptNode class `__init__` constructor, aside from the `model_name_or_path` parameter would also have a
+ `prompt_template` parameter which would serve as the current and default template of the PromptNode.
+
+- ``` python
+ def __init__(self, model_name_or_path: str = "google/flan-t5-base", prompt_template: Union[str, PromptTemplate] = None):
+ ```
+
+ If the `prompt_template` is not specified in the `PromptNode` init method then user is required to specify the
+ template in the prompt method:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ pn = PromptNode(model_name_or_path="google/flan-t5-base")
+ pn.prompt("question-generation", documents=["Berlin is the capital of Germany."])
+
+ ----------------------------------------------------------------------------
+ ['What is the capital of Germany?']
+ ```
+
+ Otherwise, when the `PromptNode` is initialized with a prompt template user can invoke the `PromptNode` directly
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ pn = PromptNode(model_name_or_path="google/flan-t5-base", prompt_template="question-generation")
+ pn(documents=["Berlin is the capital of Germany."])
+
+ ----------------------------------------------------------------------------
+ ['What is the capital of Germany?']
+ ```
+
+- Template parameters verification
+
+ All template input parameters will be verified to match the template definition and the corresponding runtime
+ parameters for the input variables will be checked for type and value. For example:
+
+ ``` python
+ from haystack.nodes.llm import PromptNode
+ on = PromptNode(model_name_or_path="google/flan-t5-base")
+ on.prompt("question-generation", some_unknown_param=["Berlin is the capital of Germany."])
+
+ ----------------------------------------------------------------------------
+ ValueError Traceback (most recent call last)
+ in
+ 1 # tasks parameters are checked
+ ----> 2 sa(some_param=[Document("Berlin is the capital of Germany.")])
+
+ 2 frames
+ /usr/local/lib/python3.8/dist-packages/haystack/nodes/llm/multi_task.py in __call__(self, *args, **kwargs)
+ 34 if set(template_dict.keys()) != set(self.input_variables):
+ 35 available_params = set(list(template_dict.keys()) + list(set(kwargs.keys())))
+ ---> 36 raise ValueError(f"Expected prompt params {self.input_variables} but got {list(available_params)}")
+ 37
+ 38 template_dict["prompt_template"] = self.prompt_text
+
+ ValueError: Expected prompt params ['documents'] but got ['some_unknown_param']
+ ```
+
+- Pipelines
+
+ Even though we can use PromptNode directly its real power lies in using pipelines and Haystack. For example, we
+ can retrieve documents from the document store using the query and then inject the retrieved documents into documents
+ as a parameter to the selected PromptNode template. For example:
+
+ ``` python
+ from haystack.pipelines import PromptNode
+ top_k = 3
+ query = "Who are the parents of Arya Stark?"
+ retriever = EmbeddingRetriever(...)
+ pn = PromptNode(model_name_or_path="google/flan-t5-base", prompt_template="question-answering")
+
+ pipe = Pipeline()
+ pipe.add_node(component=retriever, name="Retriever", inputs=["Query"])
+ pipe.add_node(component=pn, name="prompt_node", inputs=["Retriever"])
+
+ output = pipe.run(query=query,
+ params={"Retriever": {"top_k": top_k}},
+ questions=[query for n in range(0, top_k)],
+ #documents parameter we need for this task will be automatically populated by the retriever
+ )
+
+ output["results"]
+ ```
+
+ - However, we are still not utilizing the full power of Haystack pipelines. What if we could use more than
+ one PromptNode in the pipeline? Perhaps we could first retrieve documents from the retriever, pass it
+ to first PromptNode that will generate questions from these documents, and then add a
+ second PromptNode component that will answer those generated questions given the documents as the
+ context. Here is how we can do exactly that:
+
+ ``` python
+ top_k = 3
+ query = "Who are the parents of Arya Stark?"
+ retriever = EmbeddingRetriever(...)
+ model = PromptModel(model_name_or_path="google/flan-t5-small")
+
+ qg = PromptNode(prompt_template="question-generation", prompt_model=model, output_variable="questions")
+ qa = PromptNode(prompt_template="question-answering", prompt_model=model)
+
+ pipe = Pipeline()
+ pipe.add_node(component=retriever, name="Retriever", inputs=["Query"])
+ pipe.add_node(component=qg, name="qg", inputs=["Retriever"])
+ pipe.add_node(component=qa, name="qa", inputs=["qg"])
+
+ result = pipe.run(query=query)
+
+ print(result["results"])
+
+ ```
+
+ The above example shows how we can use PromptNode in a pipeline and how we can bind the results of one
+ PromptNode to the input of another PromptNode. The `output_variable` parameter used in the constructor of the
+ question answering node, and assigned value `questions` indicates that variable `questions` could be resolved by any
+ downstream pipeline node. In this particular case, the output of the question generation node will be passed to downstream
+ nodes, and answering node will use that `questions` variable to inject its stored value into the `questions` parameter of
+ the question answering template.
+
+ A careful reader of this proposal will also notice that we are using the same PromptModel instance for both PromptNodes.
+ This is done mainly for reuse as the PromptModel instance could be a locally run LLM and we don't want to load it
+ multiple times.
+
+ As LLMs are very resource intensive we can also envision a scenario where we would like to use a remote LLM service.
+ In such cases we can use multiple instances of a PromptNode in a pipeline directly thus bypassing PromptModel altogether.
+
+
+ - Pipeline YAML config file
+
+ Let's recreate the above pipeline using a YAML config file and a declarative way of defining a pipeline.
+
+ ```yaml
+
+ components:
+
+ # can go in pipeline
+ - name: prompt_node
+ params:
+ prompt_template: template
+ model_name_or_path: model
+ output_variable: "questions"
+ type: PromptNode
+
+ # can go in pipeline
+ - name: prompt_node_2
+ params:
+ prompt_template: "question-answering"
+ model_name_or_path: deepset/model-name
+ type: PromptNode
+
+ # not in pipeline - only needed if you're reusing the model across multiple PromptNode in a pipeline
+ # and hidden from users in the Python beginner world
+ - name: model
+ params:
+ model_name_or_path: google/flan-t5-xl
+ type: PromptModel
+
+ # not in pipeline
+ - name: template
+ params:
+ name: "question-generation-v2"
+ prompt_text: "Given the following $documents, please generate a question. Question:"
+ input_variables: documents
+ type: PromptTemplate
+
+ pipelines:
+ - name: question-generation-answering-pipeline
+ nodes:
+ - name: EmbeddingRetriever
+ inputs: [Query]
+ - name: prompt_node
+ inputs: [EmbeddingRetriever]
+ - name: prompt_node_2
+ inputs: [prompt_node]
+ ```
+ First of all, notice how we reuse the resource heavy PromptModel instance across multiple PromptNode instances. And
+ although we could have used already registered `question-generation` prompt template, we decided to define a new one
+ called `question-generation-v2` and as such set it as the default template for the first PromptNode. We also defined
+ the output of the first PromptNode as `questions` and used that variable in the second PromptNode.
+
+ In conclusion, we can see that the YAML config file is a mirror image of the previous code centric pipeline
+ example and also a very powerful way of defining a pipeline.
+
+
+
+ - Default tasks/prompts to be added to PromptNode
+
+ [Muffin]:
+ - Summarization
+ - Natural Language Inference
+ - Multiple-Choice QA
+ - Translation
+ - Sentiment Analysis
+ - Extractive QA
+ - Structured Data to Text
+ - Coreference Resolution
+ - Code Repair
+ - Code Error Generation
+ - Dialogue Context Generation
+ - Closed-Book QA
+ - Next Sentence Prediction
+ - Paraphrasing Identification
+ - Conversational Question Answering
+ - Topic Classification
+ - Mathematical QA
+ - Dialog Next Turn Prediction
+ - Grammatical Acceptability
+ - Punctuation fixing
+
+ [T0-SF]:
+ - Adversarial QA
+ - Question Generation
+ - Commonsense Reasoning
+ - Title Generation
+ - Dialogue Turn Prediction
+ - Predict Span Indices
+ - Context Generation
+
+ [NIV2]:
+ - Program Execution
+ - Text Matching
+ - Toxic Language Detection
+ - Cause Effect Classification
+ - Information Extraction
+ - Textual Entailment
+ - Wrong Candidate Generation
+ - Named Entity Recognition
+ - Commonsense Classification
+ - Fill-in-the-blank
+ - Text Completion
+ - Sentence Composition
+ - Question Understanding
+
+ [CoT Reasoning]:
+ - Explanation Generation
+ - Generate Question And Answer
+ - Grade School Math Word Problems
+ - Algebraic Question Answering
+ - Common Sense Reasoning Over Entities
+ - Common Sense Reasoning For QA
+ - Passage Based Question Answering
+ - Sense-Making And Explanation
+
+ # Drawbacks
+- One potential drawback of PromptNode is that it may require a significant amount of computational resources
+ to use. This may limit its use in applications or environments where there are constraints on the available hardware
+ or software resources.
+
+
+- Due to current pipeline design limitations PromptTemplate has to be a subclass of BaseComponent. This might slightly
+ confuse some users who are already familiar with Haystack components. We will mitigate this issue in subsequent releases
+ as we refactor the pipeline design. All in all, PromptTemplate will be a thin class with minimal inheritance signature from some base class.
+
+
+
+ # Alternatives
+
+- One alternative to PromptNode is to continue to use separate models for each NLP task in Haystack. This
+ can enable users to tailor the model to the specific requirements of each task, and to potentially improve the
+ performance of the model for that task by additional fine-tuning or model adaptation via GPL. However, using separate
+ models may require these complex and computationally intensive training and deployment processes, and may not be as
+ efficient or flexible as using a single, unified model.
+
+ # Adoption strategy
+- This is not a breaking change proposal and we should implement it immediately.
+
+ # How do we teach this?
+- This change would require change in documentation.
+- We can provide examples of how to use PromptNode in Haystack pipelines via tutorials.
+- Docs and tutorials need to be updated
diff --git a/testbed/deepset-ai__haystack/proposals/text/3695-demo-repository.md b/testbed/deepset-ai__haystack/proposals/text/3695-demo-repository.md
new file mode 100644
index 0000000000000000000000000000000000000000..bbfeed38b379490707fad25d1222e6f986390615
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3695-demo-repository.md
@@ -0,0 +1,77 @@
+- Start Date: 2022-12-12
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3695
+- Github Issue: (if available, link the issue containing the original request for this change)
+
+# Summary
+
+A new git repository is created to host NLP applications showcasing Haystack's features.
+
+# Basic example
+
+A git repository was already created on Github as an example:
+
+https://github.com/deepset-ai/haystack-demos
+
+# Motivation
+
+NLP applications showcasing Haystack's capabilities can be an invaluable learning resource
+for its users, but at this moment we don't fully take advantage of the only one demo we have
+as a documentation source.
+
+This proposal aims at overcoming that limitation in two ways:
+- Define better requirements for a demo application so that users can learn from it.
+- Make it easier to add more demo applications showcasing Haystack.
+
+# Detailed design
+
+Every demo has a descriptive name that will be used as its identifier.
+
+Every demo lives in a dedicated folder named after its identifier at the root of the repo, and
+provides all the resources needed to understand the code, run the application locally or deploy it
+remotely on a server.
+
+Every demo provides a README.md file containing the following information:
+- A brief description of the application and what's its goal.
+- Explicit mention of which NLP use case is implemented: for example "QA", or "Document Retrieval".
+- Detailed instructions about how to run the application locally.
+- Any hardware requirement and the limitations when not provided (for example, a GPU device).
+- How to modify and test the code, and how to contribute changes.
+
+The code of a demo application should be tested whenever possible, and at least some of the
+tests should be able to run in the repo CI system. Every demo has a dedicated Workflow defined
+in a file named after its identifier. The workflow runs only when files in the demo folder are
+modified.
+
+In case the CI is needed for continuous deployment, or for building artifacts, a demo can have
+more than one workflow file defined, named after its identifier plus a descriptive suffix, for
+example: `my_demo_identifier.yml` for tests, `my_demo_identifier_docker.yml` for building a
+Docker image, `my_demo_identifier_deploy.yml` for continuous delivery.
+
+# Drawbacks
+
+- The code of the existing demo would be removed from Haystack and potentially become harder to
+ find for existing contributors.
+- The proposed design dictates a list of new requirements for a demo that will take time to
+ implement.
+
+# Alternatives
+
+- Leave things as they are
+- Implement the design proposal to a subfolder of Haystack's git repository
+
+# Adoption strategy
+
+Adoption will be mostly driven by communicating the changes to the community and monitoring the
+traffic in the new Github repository: interacting with the existing demo will not be affected
+but accessing the code would.
+
+# How we teach this
+
+- A link to the demo repository will be added to the web page of the [demo itself](https://haystack-demo.deepset.ai/).
+- Haystack's README and documentation will mention where to find the code for the demos.
+- [Haystack Home](https://haystack.deepset.ai) will host a whole section dedicated to Haystack demos
+ (detailing the aforementioned section is out of scope for this proposal).
+
+# Unresolved questions
+
+N/A.
diff --git a/testbed/deepset-ai__haystack/proposals/text/3784-shaper.md b/testbed/deepset-ai__haystack/proposals/text/3784-shaper.md
new file mode 100644
index 0000000000000000000000000000000000000000..52a057dcd6f18353ed988a611ba9f5b6149f9a9a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3784-shaper.md
@@ -0,0 +1,272 @@
+- Title: Shaper
+- Decision driver: Vladimir
+- Start Date: 2022-12-29
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3784/
+
+# Summary
+
+Input/Output Shaper (Shaper) is a new pipeline component that can invoke arbitrary, registered functions, on the
+invocation context (query, documents etc.) of a pipeline and pass the new/modified variables further down the pipeline.
+
+# Basic example
+
+In the following example, we'll use Shaper to add a new variable `questions` to the invocation context.
+`questions` is a copy of query variable. This functionality of Shaper is useful when we simply want to
+rename a variable in the invocation context e.g. in cases where the PromptNode template is expecting a variable
+'questions' rather than 'query'.
+
+
+```python
+
+ from haystack import Pipeline, Document
+
+ with open("tmp_config.yml", "w") as tmp_file:
+ tmp_file.write(
+ f"""
+ version: ignore
+ components:
+ - name: shaper
+ params:
+ inputs:
+ query:
+ output: questions
+ type: Shaper
+ pipelines:
+ - name: query
+ nodes:
+ - name: shaper
+ inputs:
+ - Query
+ """
+ )
+ pipeline = Pipeline.load_from_yaml(path="tmp_config.yml")
+ result = pipeline.run(
+ query="What can you tell me about Berlin?",
+ documents=[Document("Berlin is an amazing city."), Document("I love Berlin.")],
+ )
+ assert result
+ # query has been renamed to questions
+ assert isinstance(result["meta"]["invocation_context"]["questions"], str)
+
+
+```
+
+# Motivation
+
+We need Shaper to support the use cases where we want to easily add new variables to the pipeline invocation context.
+These new variables hold values which are a result of some arbitrary function invocation on the existing variables
+in the invocation context.
+
+Shaper is especially useful when combined with PromptNode(s). Aside from simply renaming variables to match
+the templates of PromptNodes, we can also use Shaper to add new variables to the invocation context. Often
+these new variables are the result of some arbitrary function invocation on the existing variables in the
+invocation context.
+
+The original idea for Shaper is related to question answering use case using PromptNode. In QA, query string variable
+passed to a pipeline run method needs to be expanded to a list of strings with the list size matching the size of the
+documents list. Therefore, we can use the query as the question to pose to all the documents in the documents list.
+
+The expected outcome of using Shaper is that we can easily add new variables to the invocation context so they can
+match the prompt templates of PromptNodes. Multiple Shaper components can be used in a pipeline to modify the
+invocation context as needed.
+
+
+# Detailed design
+
+The Shaper component is most often defined in pipelines YAML file. The YAML component definition consists of the
+params block:
+
+```yaml
+ components:
+ - name: shaper
+ params:
+ inputs:
+ query:
+ func: expand
+ output: questions
+ params:
+ expand_target: query
+ size:
+ func: len
+ params:
+ - documents
+ documents:
+ func: concat
+ params:
+ docs: documents
+ delimiter: " "
+ type: Shaper
+ pipelines:
+ - name: query
+ nodes:
+ - name: shaper
+ inputs:
+ - Query
+```
+
+The params block consists of the inputs list. Each item in the inputs list is a dictionary with the key being the
+invocation context variable that we want to modify.
+
+In the example above, we have two items in the inputs list. The first item is a dictionary with the key `query` and the
+second item is a dictionary with the key `documents`.
+
+For the query variable, we want to invoke the function `expand` and store the result in the variable `questions`.
+The `expand` function takes two keyword parameters: `expand_target` and `size`. The `expand_target` parameter is the
+name of the variable in the invocation context that we want to expand. The `size`parameter is a result of the `len`
+function invocation on the variable `documents`.
+
+For the documents variable, we want to invoke the function `concat` and store the result in the same variable.
+Therefore, after the invocation, the documents variable will hold a result of `concat` function invocation while
+we'll also have a new variable `questions` in the invocation context. The questions variable will hold a result of
+`expand` function invocation.
+
+The important thing to note here is that we can invoke functions with both keyword and positional parameters. Function
+`len` is an example of a function that takes non-keyword positional parameters. The `concat` and `expand` function
+take keyword parameters. These functions can also be invoked with positional parameters but that is not recommended.
+
+
+### Default parameters
+
+The Shaper component can also be configured with default parameters. Default parameters are used when we
+don't specify the parameters for a function invocation. The default parameters are specified in the function definition.
+
+For example, in the YAML snippet definition below, we have a function `expand` that takes two keyword parameters:
+`expand_target` and `size`. However, we haven't specified either of these parameters in the YAML config. This is
+possible because we assume that the first parameter is always the variable we want to invoke the function on. In this
+case, the variable `query`. The second parameter is the `size` of the list we want to expand the variable to. Here we
+have also defined a helper function in Shaper called `expand:size` that calculates the default value of
+this parameter - `len(documents)`.
+
+Therefore, the `expand` function, described below, will be invoked with the following parameters: `query`
+and `len(documents)`
+
+```yaml
+ components:
+ - name: shaper
+ params:
+ inputs:
+ query:
+ func: expand
+ output: questions
+ type: Shaper
+ pipelines:
+ - name: query
+ nodes:
+ - name: shaper
+ inputs:
+ - Query
+
+```
+
+We need the "default parameter" functionality to support YAML files definitions that are more concise and less
+error-prone.
+
+
+### Omitting output parameter
+
+The output parameter is optional. If it is omitted, the result of the function invocation will be stored in
+the corresponding input variable. In the example below, the output of expand function will be stored in the
+query variable.
+
+```yaml
+ components:
+ - name: shaper
+ params:
+ inputs:
+ query:
+ func: expand
+ type: Shaper
+ pipelines:
+ - name: query
+ nodes:
+ - name: shaper
+ inputs:
+ - Query
+
+```
+
+### Order of function invocation
+
+The order of function invocation is important. The functions are invoked in the order they are defined in the YAML.
+In the example below, we have two input variables: `query` and `documents`. The `query` variable is expanded to a
+list of strings and stored in the variable `questions`. The `documents` variable is then contracted and the
+variable `questions` is immediately used as the `num_tokens` keyword parameter to the `concat` function.
+
+```yaml
+ components:
+ - name: shaper
+ params:
+ inputs:
+ query:
+ func: expand
+ output: questions
+ params:
+ expand_target: query
+ size:
+ func: len
+ params:
+ - documents
+ documents:
+ func: concat
+ output: documents
+ params:
+ docs: documents
+ delimiter: " "
+ num_tokens:
+ func: len
+ params:
+ - questions
+ type: Shaper
+ pipelines:
+ - name: query
+ nodes:
+ - name: shaper
+ inputs:
+ - Query
+```
+
+### Security
+
+The Shaper component prevents arbitrary code execution. There should be no dangerous `exec` or `eval` Python calls. Only
+the functions we have specified in the Shaper component are allowed to be invoked. The functions are specified in the
+constructor using immutable data structures.
+
+This security measure might be relaxed in the future to allow users to specify their own functions. However, this
+change will require a more thorough security review.
+
+# Drawbacks and other considerations
+
+Although a "normal use" of PromptNodes would not trigger a need for Shaper there are cases where its
+use is necessary. In cases where we can only use pipeline definitions to configure the pipeline (via YAML),
+we need to use it.
+
+
+- Implementation and maintenance cost should not be high.
+- Shaper is not really useful outside of Haystack pipeline.
+- Shaper could turn out to be useful in other use cases as well - i.e. declarative pre/post processing.
+- Shaper integrates well with PromptNodes and other components.
+- No braking changes to existing components.
+
+
+# Alternatives
+
+A better solution would likely be a more general run method for components. This would allow us to arbitrarily
+define the pipeline invocation context. However even in those cases we'll need to use Shaper to modify
+existing variables in invocation context as needed.
+
+# Adoption strategy
+
+Haystack users can start using Shaper in their pipelines immediately. There are no breaking changes to
+existing components or pipelines.
+
+
+# How we teach this
+
+We will need docs update to teach users how to use Shaper. The docs will need to explain the
+motivation using Shaper and PromptNode examples. We also need to show the usage via tutorials.
+
+
+# Unresolved questions
+
+Optional, but suggested for first drafts. What parts of the design are still
+TBD?
diff --git a/testbed/deepset-ai__haystack/proposals/text/3875-table-cell.md b/testbed/deepset-ai__haystack/proposals/text/3875-table-cell.md
new file mode 100644
index 0000000000000000000000000000000000000000..a014ded77eb1fef9da7c2aef429f937c065ab107
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3875-table-cell.md
@@ -0,0 +1,238 @@
+- Title: TableCell Dataclass
+- Decision driver: Sebastian Lee
+- Start Date: 2023-01-17
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3875
+- Github Issue: https://github.com/deepset-ai/haystack/issues/3616
+
+# Summary
+
+When returning answers for a TableQA pipeline we would like to return the column and row index as the answer location
+within the table since the table is either returned as a list of lists in Haystack.
+This would allow users to easily look up the answer in the returned table to fetch the text directly from the table,
+identify the row or column labels for that answer, or generally perform operations on the table near or around the
+answer cell.
+
+# Basic Example
+
+When applicable, write a snippet of code showing how the new feature would be used.
+```python
+import pandas as pd
+from haystack.nodes import TableReader
+from haystack import Document
+
+data = {
+ "actors": ["brad pitt", "leonardo di caprio", "george clooney"],
+ "age": ["58", "47", "60"],
+ "number of movies": ["87", "53", "69"],
+ "date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
+}
+table_doc = Document(content=pd.DataFrame(data), content_type="table")
+reader = TableReader(model_name_or_path="google/tapas-base-finetuned-wtq", max_seq_len=128)
+prediction = reader.predict(query="Who was in the most number of movies?", documents=[table_doc])
+answer = prediction["answers"][0]
+
+# New feature
+# answer.context -> [["actor", "age", "number of movies"], ["Brad Pitt",...], [...]]
+# answer.offsets_in_context[0] -> (row=1, col=1)
+print(answer.context[answer.offsets_in_context[0].row][answer.offsets_in_context[0].col])
+```
+
+# Motivation
+
+## Why do we need this feature?
+To allow users to easily look up the answer cell in the returned table to fetch the answer text
+directly from the table, identify the row or column labels for that answer, or generally perform operations on the table
+near or around the answer cell.
+
+Currently, we return the location of the answer in the **linearized** version of the table, so we can use the
+`Span` dataclass. The `Span` dataclass is reproduced below:
+```python
+@dataclass
+class Span:
+ start: int
+ end: int
+ """
+ Defining a sequence of characters (Text span) or cells (Table span) via start and end index.
+ For extractive QA: Character where answer starts/ends
+ For TableQA: Cell where the answer starts/ends (counted from top left to bottom right of table)
+
+ :param start: Position where the span starts
+ :param end: Position where the span ends
+ """
+```
+This is inconvenient for users because they would need to know how the table is linearized (column major or row major)
+so they could reconstruct the column and row indices of the answer before they could locate the answer cell in the table.
+
+## What use cases does it support?
+Some examples are already stated above but to recap, to easily perform operations on the table near or around the answer cell.
+
+## What's the expected outcome?
+The addition of a new dataclass called `TableCell` that would look like
+```python
+@dataclass
+class TableCell:
+ row: int
+ col: int
+ """
+ Defining a table cell via the row and column index.
+
+ :param row: Row index of the cell
+ :param col: Column index of the cell
+ """
+```
+
+# Detailed design
+
+**New terminology:** `TableCell`, the new name for the dataclass to
+store the column and row index of the answer cell.
+
+**Basic Example:** [Above Basic Example](#basic-example)
+
+## Code changes
+- Addition of `TableCell` dataclass to https://github.com/deepset-ai/haystack/blob/main/haystack/schema.py
+```python
+@dataclass
+class TableCell:
+ row: int
+ col: int
+ """
+ Defining a table cell via the row and column index.
+
+ :param row: Row index of the cell
+ :param col: Column index of the cell
+ """
+```
+
+- Updating code (e.g. schema objects, classes, functions) that use `Span` to also support `TableCell` where appropriate.
+This includes:
+- Updating the `Answer` dataclass to support `TableCell` as a valid type for `offsets_in_document` and `offsets_in_context`
+```python
+@dataclass
+ class Answer:
+ answer: str
+ type: Literal["generative", "extractive", "other"] = "extractive"
+ score: Optional[float] = None
+ context: Optional[Union[str, List[List]]] = None
+ offsets_in_document: Optional[List[Span], List[TableCell]] = None
+ offsets_in_context: Optional[List[Span], List[TableCell]] = None
+ document_id: Optional[str] = None
+ meta: Optional[Dict[str, Any]] = None
+```
+- Similar to how we can return a list of `Span`s, we would allow a list of `TableCell`s to be returned to handle the case
+ when multiple `TableCell`s are returned to form a final answer.
+- Updating any functions that accept table answers as input to use the new `col` and `row` variables instead of `start` and `end` variables.
+This type of check for table answers is most likely already done by checking if the `context` is of type `pd.DataFrame`.
+- `TableReader` and `RCIReader` to return `TableCell` objects instead of `Span`.
+
+Changes related to the Edge Case/Bug below
+- Update `Document.content` and `Answer.context` to use `List[List]` instead of `pd.DataFrame`.
+- Update `TableReader` nodes to convert table from `List[List]` into `pd.DataFrame` before inputting to the model.
+
+## Edge Case/Bug
+Internally, Haystack stores a table as a pandas DataFrame in the `Answer` dataclass, which does not treat the column
+labels as the first row in the table.
+However, in Haystack's rest-api the table is converted into a list of lists format where the column labels are
+stored as the first row, which can be seen [here](https://github.com/deepset-ai/haystack/pull/3872), which is consistent
+with the `Document.to_dict()` method seen [here](https://github.com/deepset-ai/haystack/blob/6af4f14fe0d375a1ae0ced18930a9239401231c7/haystack/schema.py#L164-L165).
+
+This means that the current `Span` and (new) `TableCell` dataclass point to the wrong location when the table is
+converted to a list of lists.
+
+For example, the following code
+```python
+import pandas as pd
+from haystack import Document
+
+data = {
+ "actors": ["brad pitt", "leonardo di caprio", "george clooney"],
+ "age": ["58", "47", "60"],
+ "number of movies": ["87", "53", "69"],
+ "date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
+}
+table_doc = Document(content=pd.DataFrame(data), content_type="table")
+span = (0, 0)
+print(table_doc.content.iloc[span]) # prints "brad pitt"
+
+dict_table_doc = table_doc.to_dict()
+print(dict_table_doc["content"][span[0]][span[1]]) # prints "actors"
+```
+
+We have decided to store the table internally as a list of lists to avoid this issue. See discussion starting [here](https://github.com/deepset-ai/haystack/pull/3875#discussion_r1088766318).
+
+# Drawbacks
+
+Look at the feature from the other side: what are the reasons why we should _not_ work on it? Consider the following:
+
+- What's the implementation cost, both in terms of code size and complexity?
+
+I don't believe this will require too much code change since we already check for Table like answers by checking if the
+returned context is of type string or pandas Dataframe.
+
+- Can the solution you're proposing be implemented as a separate package, outside of Haystack?
+
+Technically yes, but since it affects core classes like `TableReader`, and `RCIReader` it makes sense to implement in
+Haystack.
+
+- Does it teach people more about Haystack?
+
+It would update already existing documentation and tutorials of Haystack.
+
+- How does this feature integrate with other existing and planned features?
+
+This feature directly integrates and impacts the TableQA feature of Haystack.
+
+- What's the cost of migrating existing Haystack pipelines (is it a breaking change?)?
+
+Yes there are breaking changes that would affect end users.
+1. The way to access the offsets in returned Answers would be different.
+Following the deprecation policy we will support both `Span` and `TableCell` (can be toggled between using a boolean flag)
+for 2 additional versions of Haystack.
+2. Tables in Haystack Documents and Answers will change from type pandas Dataframe to a list of lists.
+
+# Alternatives
+
+## What's the impact of not adding this feature?
+Requiring users to figure out how to interpret the linearized answer cell coordinates to reconstruct the row and column indices
+to be able to access the answer cell in the returned table.
+
+## Other designs
+1. Expand `Span` dataclass to have optional `col` and `row` fields. This would require a similar check as `TableCell`, but instead
+require checking for which of the elements are populated, which seems unnecessarily complex.
+```python
+@dataclass
+class Span:
+ start: int = None
+ end: int = None
+ col: int = None
+ row: int = None
+```
+2. Use the existing `Span` dataclass and put the row index and column index as the `start` and `end` respectively.
+This may be confusing to users since it is not obvious that `start` should refer to `row` and `end` should refer to `column`.
+```python
+answer_cell_offset = Span(start=row_idx, end=col_idx)
+```
+3. Provide a convenience function shown [here](https://github.com/deepset-ai/haystack/issues/3616#issuecomment-1361300067)
+to help users convert the linearized `Span` back to row and column indices. I believe this solution is non-ideal since it would
+require a user of the rest_api to access a python function to convert the linearized indices back into row and column indices.
+
+# Adoption strategy
+
+## How will the existing Haystack users adopt it?
+Haystack users would immediately experience this change once they update their installation of Haystack if they were using
+the TableQA reader. This would be a breaking change since it would change the `offsets_in_document` and
+`offsets_in_context` in the returned `Answer`. I'm not sure if there would be a straightforward way to write a migration
+script for this change.
+
+# How we teach this
+
+Would implementing this feature mean the documentation must be re-organized
+or updated? Does it change how Haystack is taught to new developers at any level?
+
+- The API docs for `TableCell` would need to be added.
+- The documentation page for [Table Question Answering](https://docs.haystack.deepset.ai/docs/table_qa) would need to be updated.
+- Update the (TableQa tutorial)[https://github.com/deepset-ai/haystack-tutorials/blob/main/tutorials/15_TableQA.ipynb]
+to reflect the `Span` is no longer linearzied.
+
+# Unresolved questions
+
+No more unresolved questions.
diff --git a/testbed/deepset-ai__haystack/proposals/text/3925-mrkl-agent.md b/testbed/deepset-ai__haystack/proposals/text/3925-mrkl-agent.md
new file mode 100644
index 0000000000000000000000000000000000000000..05d18a6a57a0b73a212b968367e8ed5e7ce7a36f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3925-mrkl-agent.md
@@ -0,0 +1,380 @@
+- Title: MRKLAgent
+- Decision driver: @julian-risch (in close collaboration with @vblagoje )
+- Start Date: 2023-01-27
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/3925
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/3753
+
+# Summary
+The Agent class answers queries by choosing between different tools, which are implemented as pipelines or nodes.
+It uses a large language model (LLM) to generate a thought based on the query, choose a tool, and generate the input for the tool.
+Based on the result returned by an action/tool (used interchangeably), the Agent has two options.
+It can either stop if it knows the answer now or repeat the process of 1) thought, 2) action choice, 3) action input.
+
+The Agent can be used for questions containing multiple subquestions that can be answered step-by-step (Multihop QA).
+Combined with tools like the PythonRuntime or SerpAPIComponent we imagine for Haystack, the Agent can query the web and do calculations.
+
+We have a [notebook](https://colab.research.google.com/drive/1oJf4gxkokIazLN26NNt6XsewMvCmVrz8?usp=sharing) that demonstrates how to use an Agent with two tools: PythonRuntime and SerpAPIComponent. It requires API keys for OpenAI and SerpAPI.
+The notebook is based on the branch https://github.com/deepset-ai/haystack/compare/main...mrkl-pipeline (no pull request)
+
+# Basic example
+
+An example of an Agent could use two tools: a web search engine and a calculator.
+
+The query "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" can be broken down into three steps:
+1) Searching the web for the name of Olivia Wilde's boyfriend
+2) Searching the web for the age of that boyfriend
+3) Calculating that age raised to the 0.23 power
+
+And the Agent would respond in the end with "Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896." A detailed walk-through follows below.
+
+
+# Motivation
+
+With an Agent, users can combine multiple LLMs and tools, so that they can build a truly powerful app. They can use an LLM in a loop to answer more complex questions than with ExtractiveQA or GenerativeQA. With an Agent and a tool for web search, Haystack is not limited to extracting answers from a document store or generating answers based on model weights anymore but it can use the knowledge it retrieves on-the-fly from the web. Thereby, the model's knowledge does not get outdated.
+
+In future, we envision that an Agent could use tools not only for retrieving knowledge but also for interacting with the world. For example, it could periodically skim through newly opened issues in Haystack's GitHub repository. If there is a question that can be answered based on documentation then the Agent could retrieve relevant pages from the documentation, generate an answer and post it as a first response to the issue.
+
+# Detailed design
+
+**Glossary**
+- Thought: First part of a prompt generated by Agent that serves to breakdown the query into a plan, for example, what part of the question needs to be answered first.
+- Action (or tool): Actions/tools are Haystack pipelines or nodes that the Agent can use to answer a question. We use tool and action interchangeably in this proposal until we decided on the best naming. Choosing a tool in each iteration is the center part of a prompt generated by Agent.
+- Action input: Last part of a prompt generated by an Agent. It serves as the input to a tool that the Agent uses to answer a question.
+- Observation: The output generated by a tool and sent back to the Agent.
+
+The Agent consists of a PromptNode that generates thoughts, chooses actions, and generates action inputs.
+Just like Haystack pipelines, an Agent can be loaded from a YAML file. That YAML file must also contain the tools of the Agent defined as pipelines or nodes.
+Tools need to be added to an Agent so that it can use them, just like nodes need to be added to pipelines.
+When a tool is added to an Agent, a description of the tools needs to be added so that the LLM knows when it is useful.
+
+A key functionality of the Agent is that it can act iteratively and use any of the pre-defined tools as many times as it wants based on the input query and the results returned from the tools used earlier. In every iteration, it chooses one of the tools and generates the input for that tool dynamically.
+An example application of this is MultiHopQA, where multiple subquestions need to be answered step-by-step.
+For the example query "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?", the MRKLPipeline needs to answer several subquestions. Here is an example of a full transcript of the prompt input and generated output:
+
+```
+Answer the following questions as best as you can. You have access to the following tools:
+
+Search: useful for when you need to answer questions about current events. You should ask targeted questions
+Calculator: useful for when you need to answer questions about math
+
+Use the following format:
+
+Question: the input question you must answer
+Thought: you should always think about what to do
+Action: the action to take, should be one of [Search, Calculator]
+Action Input: the input to the action
+Observation: the result of the action
+... (this Thought/Action/Action Input/Observation can repeat N times)
+Thought: I now know the final answer
+Final Answer: the final answer to the original input question
+
+Begin!
+Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
+Thought: I need to do some research to answer this question.
+Action: Search
+Action Input: Olivia Wilde's boyfriend
+Observation: First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016.
+Thought: I need to find out his age
+Action: Search
+Action Input: Jason Sudeikis age
+Observation: 47 years
+Thought: I need to raise it to the 0.23 power
+Action: Calculator
+Action Input: 47^0.23
+Observation: 2.4242784855673896
+Thought: I now know the final answer
+Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.
+```
+
+## Agent steps breakdown
+The above steps represent the entire action trace for the Agent. However, let's break it down into individual agent steps so we can understand how it makes decisions, chooses actions and action inputs.
+
+### Step 1:
+
+We start with a prompt where we instruct LLM on what we want. The first prompt we send to LLM is the following:
+```
+Answer the following questions as best as you can. You have access to the following tools:
+
+Search: useful for when you need to answer questions about current events. You should ask targeted questions
+Calculator: useful for when you need to answer questions about math
+
+Use the following format:
+
+Question: the input question you must answer
+Thought: you should always think about what to do
+Action: the action to take, should be one of [Search, Calculator]
+Action Input: the input to the action
+Observation: the result of the action
+... (this Thought/Action/Action Input/Observation can repeat N times)
+Thought: I now know the final Answer
+Final Answer: the final Answer to the original input question
+
+Begin!
+Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
+Thought:
+```
+
+Notice how we finish the prompt with the `Thought:` token, priming the model to start its generation of an actual plan of what needs to be done in the first step.
+LLM would also generate `Action:` and `Action Input:` rows of this step which help us select an Action to execute and the input for that action.
+As we also instruct the model to stop generating a response with stop words being `Observation:` the model response for this step is:
+
+```
+I need to do some research to answer this question.
+Action: Search
+Action Input: Olivia Wilde's boyfriend
+```
+
+At this point, we invoke Search (along with the input) and receive the response from the Search tool: "First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016."
+
+We append the tool response under the `Observation:`
+
+LLM generation above and the response from the Search action (added under Observation) are appended to the initial prompt.
+
+### Step 2:
+
+We start this step with the following prompt:
+
+```
+Answer the following questions as best as you can. You have access to the following tools:
+
+Search: useful for when you need to answer questions about current events. You should ask targeted questions
+Calculator: useful for when you need to answer questions about math
+
+Use the following format:
+
+Question: the input question you must answer
+Thought: you should always think about what to do
+Action: the action to take, should be one of [Search, Calculator]
+Action Input: the input to the action
+Observation: the result of the action
+... (this Thought/Action/Action Input/Observation can repeat N times)
+Thought: I now know the final Answer
+Final Answer: the final Answer to the original input question
+
+Begin!
+Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
+Thought: I need to do some research to answer this question.
+Action: Search
+Action Input: Olivia Wilde's boyfriend
+Observation: First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016.
+Thought:
+```
+
+Again, notice how we've added the response from LLM and the `Observation` from the tool to the prompt, and we finish the prompt with `Thought:` token, priming the model to start the response with the plan for this step. As in the previous step, the model generates an action plan and selects an action and its input. The LLM response is:
+
+```
+I need to find out his age
+Action: Search
+Action Input: Jason Sudeikis age
+```
+
+This LLM response above gives us enough information to invoke a Search tool again along with the appropriate input, and we receive the response from the Search: 47 years. We add this response to the prompt history as the `Observation:` keyword.
+
+### Step 3:
+
+For the sake of brevity, let's not list the entire prompt again. The critical part to remember is that we append the output of step 2 to the prompt history we are creating as we step through each agent step. These so-called reasoning traces help agents "understand" what needs to be done in each successive step.
+The last part of the prompt is the following:
+```
+Thought: I need to find out his age
+Action: Search
+Action Input: Jason Sudeikis age
+Observation: 47 years
+Thought:
+```
+
+The LLM-generated response is:
+
+```
+I need to raise it to the 0.23 power
+Action: Calculator
+Action Input: 47^0.23
+```
+
+In this step, we invoke a new tool - The calculator with specified input. The calculator response is 2.4242784855673896
+We added the calculator response to the prompt history under the Observation keyword.
+
+
+### Step 4:
+
+Again, we append a calculator response and prompt to the prompt history once again. Let's not list the entire prompt, but the last few lines:
+```
+I need to raise it to the 0.23 power
+Action: Calculator
+Action Input: 47^0.23
+Observation: 2.4242784855673896
+Thought:
+```
+
+The LLM-generated response is:
+```
+I now know the final answer
+Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.
+```
+
+Using simple string parsing, we can detect that the mode in this step responded with the "Final Answer:" keyword just as we instructed, thus breaking out of the loop and completing the agent's task with a response returned to the agent's client.
+In the rare case that "Final Answer:" is not generated even after many iterations, we can break out of the loop based on a maximum number of iterations allowed. Thereby, we can prevent an infinite loop.
+
+## Agent Creation
+
+The Agent can be either created programmatically or loaded from a YAML file.
+In the following example, one tool is a node for searching the web. The other tool is a pipeline for doing calculations in python.
+
+**Example programmatic creation:**
+```python
+search = SerpAPIComponent(api_key=os.environ.get("SERPAPI_API_KEY"), name="Serp", inputs=["Query"])
+
+prompt_model=PromptModel(model_name_or_path="text-davinci-003", api_key=os.environ.get("OPENAI_API_KEY"))
+
+calculator = Pipeline()
+calculator.add_node(PromptNode(
+ model_name_or_path=prompt_model,
+ default_prompt_template=PromptTemplate(prompt_text="Write a simple python function that calculates..."),
+ output_variable="python_runtime_input") # input
+calculator.add_node(PythonRuntime()) # actual calculator
+
+prompt_node = PromptNode(
+ model_name_or_path=prompt_model,
+ stop_words=["Observation:"]
+)
+
+agent = Agent(prompt_node=prompt_node)
+# Nodes and pipelines can be added as tools to the agent. Just as nodes can be added to pipelines with add_node()
+agent.add_tool("Search", search, "useful for when you need to answer questions about current events. You should ask targeted questions")
+agent.add_tool("Calculator", calculator, "useful for when you need to answer questions about math")
+
+result = agent.run("What is 2 to the power of 3?")
+```
+
+**Example YAML file:**
+```yaml
+version: ignore
+
+components:
+ - name: AgentPromptNode
+ type: PromptNode
+ params:
+ model_name_or_path: DavinciModel
+ stop_words: ['Observation:']
+ - name: DavinciModel
+ type: PromptModel
+ params:
+ model_name_or_path: 'text-davinci-003'
+ api_key: 'XYZ'
+ - name: Serp
+ type: SerpAPIComponent
+ params:
+ api_key: 'XYZ'
+ - name: CalculatorInput
+ type: PromptNode
+ params:
+ model_name_or_path: DavinciModel
+ default_prompt_template: CalculatorTemplate
+ output_variable: python_runtime_input
+ - name: Calculator
+ type: PythonRuntime
+ - name: CalculatorTemplate
+ type: PromptTemplate
+ params:
+ name: calculator
+ prompt_text: |
+ # Write a simple python function that calculates
+ # $query
+ # Do not print the result; invoke the function and assign the result to final_result variable
+ # Start with import statement
+
+pipelines:
+ - name: calculator_pipeline
+ nodes:
+ - name: CalculatorInput
+ inputs: [Query]
+ - name: Calculator
+ inputs: [CalculatorInput]
+
+agents:
+ - name: agent
+ params:
+ prompt_node: AgentPromptNode
+ tools:
+ - name: Search
+ pipeline_or_node: Serp
+ description: >
+ useful for when you need to answer questions about current events.
+ You should ask targeted questions
+ - name: Calculator
+ pipeline_or_node: calculator_pipeline
+ description: >
+ useful for when you need to answer questions about math
+```
+
+and loading from the YAML file into an Agent:
+
+```python
+agent = Agent.load_from_yaml(
+ "test.mrkl.haystack-pipeline.yml", agent_name="agent"
+)
+```
+
+Pipelines, agents, nodes, and tools all implement `run` and `run_batch` methods, which is the minimal contract.
+At the moment, tools are either pipelines or nodes but we can imagine more types of tools as long as they implement that minimal contract.
+
+# Drawbacks
+
+Although the scope of the initial Agent is limited, it can grow into a full-fledged framework consisting of
+various types of agents (conversation, Robotic Process Automation etc.). The field of agents is rapidly growing, and we should be aware that it can even outgrow Haystack in the future. Perhaps we can start with the Agent being part of Haystack and potentially create a new project in the future.
+
+One of the central building blocks of an Agent are the PromptNode and set "neural attachments" extending the agent's capabilities.
+Many tools like Search, Calculator, Notion and API connectors are somewhat different conceptually from the existing Haystack components.
+On the other hand, some of the existing Haystack components fit naturally into the framework of tools, for example, DocumentStore, Retriever, and Reader.
+
+There is a non-negligible potential for a growing implementation cost of such an agent framework that might
+stretch the resource away from the existing Haystack core. However, as LLM-based agents are an exciting and
+rapidly growing field, they may raise Haystack awareness significantly.
+
+# Alternatives
+
+We have considered an alternative design where the Agent is just another node or a pipeline.
+However, we decided to introduce it as a separate concept because of user-friendliness / clear code.
+While a Pipeline is a collection of Nodes, an Agent is a collection of Pipelines.
+Nodes in a pipeline have a pre-defined execution order, whereas the execution order of Pipelines in an Agent are chosen at runtime by a LLM.
+
+Regarding the name *Agent*, we considered several alternatives and prefer *Agent* for its simplicity.
+Alternative names:
+- MRKLAgent
+- LLMOrchestrator
+- LLMChain
+- Toolchain (fits nicely with tools and [toolchains](https://en.wikipedia.org/wiki/Toolchain) in software)
+- PipelineComposer / LLMComposer
+- PipelineComposition / LLMComposition
+- Interesting naming tidbits:
+ - MRKL [paper](https://arxiv.org/pdf/2205.00445.pdf) never uses word agent, only system
+ - ReAct [paper](https://arxiv.org/pdf/2210.03629.pdf) uses agent almost exclusively
+
+
+# Adoption strategy
+
+Introducing the Agent concept is a rather big change that would require a careful adoption strategy. We would need a lot more documentation explaining these new concepts, and each attaching tool would need additional documentation.
+
+However, the existing Haystack users, especially advanced users have already requested an agent framework to be added as part of the Haystack. We anticipate that advanced users will be the first to adopt the Agent.
+
+Using an Agent requires an OpenAI api key and some tools require additional api keys, for example SerpAPI but there are free trials.
+
+The debugging output of the Agent will help users to better understand how it works. In a debugger, the agent works as any other Haystack pipeline containing a prompt node.
+
+# How we teach this
+
+Yes, adding agents to Haystack would require a lot of documentation changes. Perhaps even a separate documentation for MRKL and other future agents somewhat detached from Haystack.
+
+We can teach existing Haystack users about agents and agent tools in a new section of the documentation. We can also organize Discord office hours, tutorials, and webinars to teach the new concepts.
+
+# Unresolved questions
+
+**Name of the parameter pipeline_or_node**
+* When we add a tool to the agent, we need to specify the name of the pipeline or node (component) to add.
+This parameter could be called `pipeline_or_node` or `pipeline_or_component_name` etc.
+
+**Umbrella Term for Pipeline and Agent**
+* We need a term that captures pipelines and agents for communication with users (NLP application, flow, system, service, engine ...).
+Let's have that conversation separately from this proposal.
+
+**Tools we imagine in the near future**
+* Tools will be discussed in a separate proposal.
diff --git a/testbed/deepset-ai__haystack/proposals/text/3959-json-converter.md b/testbed/deepset-ai__haystack/proposals/text/3959-json-converter.md
new file mode 100644
index 0000000000000000000000000000000000000000..1643929ad232c46780564b4afc49614803e3883e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/3959-json-converter.md
@@ -0,0 +1,142 @@
+- Title: Addition of a `JsonConverter` node
+- Decision driver: @bglearning
+- Start Date: 2023-01-26
+- Proposal PR: #3959
+
+# Summary
+
+Right now we don't have a node that can take json files as input to be fed into a pipeline.
+
+Proposal: Add a `JsonConverter` node that takes in a json file, parses it, and generates `Document`s.
+It would also support the `jsonl` format with one line corresponding to one document.
+
+# Basic example
+
+```python
+from haystack.nodes import JsonConverter
+
+converter = JsonConverter()
+
+# Receive back List[Document]
+docs = converter.convert("data_file.json")
+```
+
+With the `data_file.json` as a list of json representation of documents:
+
+```json
+[
+ {
+ "content": "...",
+ "content_type": "text", "meta": {...}
+ },
+ {
+ "content": [["h1", "h2"], ["val1", "val2"]],
+ "content_type": "table", "meta": {...}
+ }
+]
+```
+
+Alternatively, the data can also be `jsonl`.
+By default, the converter will try to auto-detect between `json` and `jsonl`.
+
+The main use case would be to be able to include this directly in the YAML specification
+
+```yaml
+...
+
+pipelines:
+ - name: indexing
+ nodes:
+ - name: JsonConverter
+ inputs: [File]
+ - name: Retriever
+ inputs: [JsonConverter]
+ - name: DocumentStore
+ inputs: [Retriever]
+```
+
+# Motivation
+
+Users may want to do some processing of the data themselves, persist it somehow, and only then pass it onto a haystack pipeline (for instance, by uploading into the REST API endpoint). Ideally this would happen without the need to create a custom endpoint.
+
+For many such processing, json is a convenient intermediate format as it allows for things like specifying the metadata.
+
+Specifically, one use-case that has come up for a team using haystack: they want to use a PDF parser (for tables) currently not in haystack. As such, they want to handle the parsing themselves outside of haystack, put the parsed result into a json file, and then pass it onto a haystack API endpoint.
+
+Having a `JsonConverter` node would allow users to setup a haystack pipeline to ingest such data without the user having to create a custom node for it.
+
+# Detailed design
+
+The converter would primarily be a wrapper around `Document.from_dict`.
+
+The schema accepted would be the a list of json dictionary of Documents.
+So, the following, with `content` being the only compulsory field.
+
+```
+[
+ {
+ "content": str or list[list],
+ "content_type": str,
+ "meta": dict,
+ "id_hash_keys": list,
+ "score": float,
+ "embedding": array
+ },
+ ...
+]
+```
+
+```python
+class JsonConverter(BaseConverter):
+ def __init__(self, ...):
+ ...
+
+ def convert(
+ self,
+ file_path: Path,
+ meta: Optional[Dict[str, str]] = None,
+ encoding: Optional[str] = "UTF-8",
+ id_hash_keys: Optional[List[str]] = None,
+ ...
+ ) -> List[Document]:
+ if id_hash_keys is None:
+ id_hash_keys = self.id_hash_keys
+
+ documents = []
+ with open(file_path, encoding=encoding, errors="ignore") as f:
+ data = json.load(f)
+ for doc_dict in data:
+ doc_dict = dict(doc_dict)
+ doc_dict['id_hash_keys'] = id_hash_keys
+ doc_dict['meta'] = doc_dict.get('meta', dict())
+
+ if meta:
+ doc_dict['meta'].update(meta)
+
+ documents.append(Document.from_dict(doc_dict))
+
+ return documents
+```
+
+# Drawbacks
+
+- It would add another node that needs to be maintained and documented.
+
+# Alternatives
+
+- This node could be created as a custom node for the particular application where it is required. But could be better to have it out-of-the-box.
+- Design Alternative: Also, provide options to map custom fields to `Document` fields (E.g. {"review": "content"}) which could make this node a bit more flexible and might mean the user doesn't have to do some pre-formatting beforehand. But this can be a future development.
+
+# Adoption strategy
+
+It doesn't introduce a breaking change and wouldn't require changes in existing pipelines.
+
+# How we teach this
+
+It would be good to have this be part of the Guide (perhaps under File Converters).
+
+Could also be mentioned in one of the tutorials. For instance, in the preprocessing tutorial where we say "Haystack expects data to be provided as a list documents in the following dictionary format".
+
+# Unresolved questions
+
+- Should the `content_type` allowed be restricted (e.g. only "text" and "table"). And relatedly, should the name be more specific? E.g. `JsonTableTextConverter` rather than `JsonConverter`. Currently leaning towards no restriction and the `JsonConverter` name.
diff --git a/testbed/deepset-ai__haystack/proposals/text/4084-agent-demo.md b/testbed/deepset-ai__haystack/proposals/text/4084-agent-demo.md
new file mode 100644
index 0000000000000000000000000000000000000000..a6d8f8be592f8ad488bad055f9c5ffb03e7fb825
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/4084-agent-demo.md
@@ -0,0 +1,262 @@
+- Title: Agent tools
+- Decision driver: Vladimir Blagojevic (with Julian Risch)
+- Start Date: 2023-02-06
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/4084
+
+
+# Summary
+
+Agent system consists of an extendable set of modules (aka experts/tools) and a "router" dispatching natural
+language input to a module that can best respond to the given input. These modules can be:
+
+• Neural, including the general-purpose LLMs as well as other smaller, specialized LMs.
+
+• Symbolic, for example a math calculator, a currency converter, or an API call to a search engine.
+
+
+Current Haystack pipeline design approach fits nicely with these extension modules/tools. They can be tested in
+isolation and subsequently integrated into the agent. The agent can register these tools, understand their objectives
+and route the user input to the appropriate tool. The tools can be either Haystack pipelines or components.
+
+
+A demo idea for the new Agent is to demonstrate Question Answering on Technical Documentation (using the example of
+Haystack). It should be useful and impressive, and it determines what tools we will implement first.
+
+**Example Questions:**
+- "Why am I seeing duplicate answers being returned?" based on indexed FAQ documentation
+- "Which organizations use Haystack?" based on web search and Wikipedia returning answers via SearchEngine
+- "How can I choose the model for PromptNode?" based on retrieving documents via WebRetriever
+- "How can I make overwrite_with_env_variables work in RayPipeline" based on an open issue found with GitHub API or web search
+
+If an answer cannot be found in indexed files, the Agent will use self reflection to rephrase the question and/or search the
+web. It will give updates while searching, for example print thoughts: “Found nothing in indexed documentation. Will continue
+with web search.” If still nothing can be found, the Agent will generate a link to a pre-filled and pre-tagged issue template
+like this that the user can choose to create. Tools required for the demo: SearchEngine, GitHubAPI, Self Reflection Module, WebRetriever.
+
+We also need to demonstrate how the Agent uses a combination of multiple tools to answer a question instead of just trying
+them sequentially.
+
+**Example Question:**
+- "Is there an open issue about any of the nodes in a standard QA pipeline not working?"
+
+Here the LLM might first use documentation to find out more about what nodes are part of standard QA pipelines and then
+searches on GitHub for open issues that mention retriever or reader.
+
+We propose the following tools to be developed for the MVP version of our agent (#3925):
+
+- SearchEngine
+- Self reflection module (query rephrasing)
+- WebRetriever
+- Top-p (nucleus) sampling
+- Agent memory
+- deepset Cloud API
+
+# Basic example
+
+No code examples are provided, but a high-level overview of how the feature would be used.
+
+There is also a [colab notebook](https://colab.research.google.com/drive/1oJf4gxkokIazLN26NNt6XsewMvCmVrz8?usp=sharing)
+demonstrating how to use an agent with tools and a [branch](https://github.com/deepset-ai/haystack/compare/main...mrkl-pipeline)
+for demo purposes (no pull request).
+
+# Motivation
+
+Agent tools are the main and essential building block of the agent system. They are the modules extending the
+agent's capabilities. Agent, when it relies only on itself, is not as powerful as when it can leverage the
+external modules (tools) that are highly specialized in their respective domains. Agent can't do, for example, lookup of
+restaurants in our neighbourhood, but it can use SearchEngine to do that. When it comes to complex math calculations, LLM
+would be a poor choice, but a math calculator would be a good fit, and so on.
+
+Combining the power of LLMs with the power of external tools is the key ingredient to the success of the agent framework.
+
+
+# Detailed design
+
+In the following sections, we list essential agent tools required for agent MVP. We start with the general design
+principles and then describe each tool in detail.
+
+Each tool is defined as a Python class that inherits from the BaseComponent class. The base class acts as a wrapper
+around the actual tool implementation.
+
+The main Agent modules/tools are:
+
+## SearchEngine
+
+SearchEngine is a symbolic API module allowing programmatic interaction with Google and other search engines. We'll have
+multiple providers of SearchEngine including https://serper.dev, https://www.searchapi.io/ and https://serpapi.com as initial providers.
+
+SearchEngine will return a list of results (e.g. List[Document]), the content of each document being a "snippet" of the
+single search result, while all other attributes of the search results (e.g. title, url link, etc.) will
+be metadata of the document.
+
+### SearchEngine scoping
+
+A great feature of SearchEngine is that it can be scoped to a particular domain. Therefore, in our demo, if so desired,
+we can search through Haystack documentation on docs.haystack.com, github.com/deepset-ai/haystack and so on.
+
+## Self reflection module
+
+SRM is a neural module that aims to improve the agent's overall robustness. Agents can sometimes be very fragile
+in their execution steps. As a core component, the motivation for the self-reflection module (SRM) is to improve the
+robustness of the agent's execution. Agents can be fragile in their execution steps due to the non-deterministic nature
+of LLM inferencing and their sensitivity to the chosen prompts.
+
+SRM relies on instruction following LLM and operates in two lock-step sequences. Given any other agent module/tool,
+its description, and its input and output - SRM does the following:
+
+1. It checks if the module description (objective), the input, and the output are aligned/congruent.
+2. If they are not aligned, it rephrases the input (while retaining semantic meaning) and attempts to elicit an aligned
+output.
+
+SRM can improve the robustness of any module. For example, SearchEngine can sometimes be very sensitive to query wording.
+If we are searching for "Olivia Wilde's boyfriend", the results might not be as precise as if we search for "Who is
+Olivia Wilde's current boyfriend?". SRM can rephrase the query to the latter form if the former form doesn't return
+an aligned answer.
+
+It is still an open question of how to implement SRM. We can make it an internal component or another pipeline. The
+former approach is more efficient and has tight coupling, and the latter is more flexible. We'll carefully weigh
+the pros and cons of each approach and decide on the best approach.
+
+One idea how to integrate SRM into an existing Agent and its tools is that every tool can set a parameter indicating
+how often SRM should run for this tool (default value 0). If it is 0, SRM will not be used at all for that tool,
+otherwise it will check at least once for alignment/congruency and optionally rephrase. This will repeat but not
+more often than the parameter allows.
+
+One possible implementation of SRM could be https://arxiv.org/abs/2203.11171
+
+The main motivation for addition of SRM (and self-consistency ideas in general) is the finding from
+https://arxiv.org/abs/2210.03629 Google Brain researchers discovered that "Wrong reasoning trace (including failing
+to recover from repetitive steps)" accounts for 47% of agent errors.
+
+## WebRetriever
+
+WebRetriever is a symbolic module that allows users to query the web for relevant documents. It is a wrapper around
+SearchEngine that produces a list of Haystack Documents.
+
+WebRetriever will operate in two modes:
+
+- snippet mode: WebRetriever will return a list of Documents, each Document being a snippet of the search result
+- document mode: WebRetriever will return a list of Documents, each Document being a full HTML stripped document of the search result
+
+In document mode, given a user query passed via the run method, SearchEngine first fetches the top_k relevant URL hits, which are
+downloaded and processed. The processing involves stripping irrelevant HTML tags and producing clean raw text. WebRetriever
+then splits raw text into paragraph-long Documents of the desired size.
+
+In the future, we'll develop WebRetriever variants with DocumentStore that caches documents with some expiration
+setting. The enhanced WebRetreiever versions will allow us to avoid downloading the same documents from the web
+multiple times.
+
+However, for the first version of the agent, we'll strive to keep WebRetriever as simple as possible.
+
+## Top-p (nucleus) sampling
+
+Although very useful, top-k ranking is sometimes inferior to top-p ranking. Instead of filtering only from the most
+likely k hits, in top-p sampling we choose the smallest possible set of documents whose cumulative probability of
+relevance exceeds the probability p (usually close to 1). The relevance could be calculated via sbert.net CrossEncoder using
+query and the document content.
+
+In web search, this is a very useful feature as it allows us to avoid query irrelevant documents and be super precise
+in our search results. We'll implement top-p sampling as a separate module that WebRetriever can use. Other components
+in Haystack can use it as well.
+
+The main motivation for addition of top-p sampling is the finding from https://arxiv.org/abs/2210.03629 Google Brain
+researchers found that "Search result error" is the main cause in 23% of the cases of agent failure. Top-p sampling
+can help us minimize this point of failure.
+
+Note that one can still use top-k filtering via Ranker and top-p filtering via TopPSampler in combination.
+
+## Agent memory
+
+Although we currently support only so-called ReAct agents, it is not hard to envision a future where we'll have
+additional agent types including conversational agents.
+
+Due to LLMs one-shot-forget nature of inferencing, conversational agents might need to remember the context of the
+conversation. To support conversational agents, we'll need Agent memory component. The memory component will initially contain
+two submodules: entity extraction and summarization
+
+Entity extraction is a neural module extracting entities from the provided conversation transcript (raw text).
+The entities are best thought of as an outcome of Named-entity recognition task; for example, people, places, organizations etc.
+
+Entity summarization is a neural module that summarizes the entities extracted by the entity extraction module.
+
+Entity extraction and summarization are run in the background as the conversation progresses. The
+frequency of extraction and summarization updates will be configurable.
+
+The extracted entities along with relevant summaries will be stored in the Agent memory. Agent memory implementation
+details are out of scope of this proposal; they could be various short or long term memory storage options.
+
+For the first version of the agent, we'll strive to keep the memory component as simple as possible; we'll
+only implement entity extraction and summarization while we'll use runtime memory for storing entities.
+
+### Future improvements:
+
+As we have limited token payload for model inferencing, we'll need to implement a mechanism for decaying memory.
+
+If we have many entities in the memory, we'll also need to implement a mechanism for entities selection.
+We'll likely need no summaries for well-known entities like "Elon Musk" or "New York".
+
+Information related to the entities could become stale over time (e.g."I'm currently in New York") and we'll
+need to implement a mechanism for updating the entities.
+
+The mechanism for decaying memory, prioritizing and updating entities is out of scope of this proposal.
+
+
+## deepset Cloud API
+The Agent should be able to use pipelines deployed on deepset Cloud as a tool.
+To this end, the text question needs to be send via REST API to
+the [search endpoint](https://docs.cloud.deepset.ai/reference/search_api_v1_workspaces__workspace_name__pipelines__pipeline_name__search_post) of a given pipeline deployed on deepset Cloud.
+
+In the demo, the Agent will use this tool for question answering on indexed documents of the Haystack documentation,
+such as documentation web pages or tutorials.
+
+
+# Drawbacks
+
+One of the main reasons why we should not work on implementing this proposal is that it is a rather large
+undertaking requiring substantial resources. This naturally carries significant risks. We should carefully weigh
+the pros and cons of tools we want to implement and prioritize them. The current priority criteria is that the tool
+should be useful for the agents MVP, the majority of early adopters and that it should be relatively easy to
+implement. We can always add more tools in the future.
+
+Another priority guidance is the demo we intend to build. We want to build a demo that showcases the agent's
+capabilities in a use case that is relevant to the majority of early adopters (current Haystack users).
+
+
+# Alternatives
+
+We also considered the following demo alternatives:
+
+- Medical QA: We can build a demo that answers medical questions. This is a very interesting use case but also
+fraught with risks. A question one might ask in such a demo is “Which antibiotic should I use for urinary tract infections?”.
+A factoid-based QA system might (reasonably) return the answer “trimethoprim 200mg”. However, a “correct” answer is not
+sufficient to translate into clinical use. There were other recent demos but they were not very successful.
+See https://twitter.com/GlassHealthHQ/status/1620092094034620421 for more details.
+
+- Public Healthcare QA: a bit less risky proposal than the medical QA. We can build a demo that answers questions about
+healthy diet, cooking recipes, vitamines etc. This demo would use almost exactly the same tools as the main demo proposal
+and we can potentially switch to this demo if needed.
+
+- Financial Domain (earnings transcript): we can build a demo that answers questions about earnings transcripts. However,
+we were not sure if this is a good use case for the agent as it is not very relevant to the majority of early adopters.
+
+
+# Adoption strategy
+
+An adoption strategy for this proposal is not needed as much and the objective is to demo the capabilities of the agent
+and inspire early adopters to use the agent and the main tools we are going to implement.
+
+
+# Demo
+
+See the Summary section for the demo description.
+
+# How we teach this
+
+We intend to use the demo to teach users about the agent's capabilities. We'll subsequently add more documentation about
+core components used in the demo and the agent in general. This demo would be mainly used to promote Haystack Agents and
+to generate interest in the agent.
+
+# Unresolved questions
+
+Optional, but suggested for first drafts. What parts of the design are still
+TBD?
diff --git a/testbed/deepset-ai__haystack/proposals/text/4172-shaper-in-prompt-template.md b/testbed/deepset-ai__haystack/proposals/text/4172-shaper-in-prompt-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..3dd8180d1e8b956cab26edaaffcd4870bdf34d90
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/4172-shaper-in-prompt-template.md
@@ -0,0 +1,185 @@
+- Title: Shapers in Prompt Templates
+- Decision driver: tstadel
+- Start Date: 2023-02-15
+- Proposal PR: (fill in after opening the PR)
+- Github Issues or Discussion:
+ - spike: https://github.com/deepset-ai/haystack/pull/4061
+ - solved issues:
+ - https://github.com/deepset-ai/haystack/issues/3877
+ - https://github.com/deepset-ai/haystack/issues/4053
+ - https://github.com/deepset-ai/haystack/issues/4047
+
+# Summary
+
+In order to make prompt templates more flexible and powerful while at the same time making PromptNode as easy to use as any other node in Haystack, we want to introduce two modifications to PromptTemplate:
+- output: support Shapers in PromptTemplates to enable the user to define how the output to the prompt template should be shaped
+- input: extend the prompt syntax to support the usage of functions that can be applied to input variables
+
+With these modifications prompt templates will be able to define, and abstract away from PromptNode, everything that is necessary to create a Haystack node that is specialized for a certain use-case (e.g. generative QA). Additionally, PromptTemplates will be fully serializable, enabling everyone to share their prompt templates with the community.
+
+# Basic example
+
+A generative QA pipeline would be as easy as this:
+
+ ```python
+ from haystack import Pipeline
+ from haystack.document_store import InMemoryDocumentStore
+ from haystack.nodes import PromptNode, EmbeddingRetriever
+
+ document_store = InMemoryDocumentStore()
+ retriever = EmbeddingRetriever(document_store=document_store, ...)
+ pn = PromptNode(default_prompt_template="question-answering-with-references")
+
+ p = Pipeline()
+ p.add_node(component=retriever, name="Retriever", inputs=["Query"])
+ p.add_node(component=pn, name="Prompt", inputs=["Retriever"])
+ ```
+
+As a result we get a pipeline that uses PromptNode as a drop-in replacement for Generators:
+
+ ```python
+ p.run(
+ query="What is the most popular drink?"
+ )
+ ```
+
+ ```python
+ {'answers': [],
+ 'invocation_context': {'query': 'What is the most popular drink?',
+ 'documents': [],
+ 'answers': []},
+ '_debug': {'PromptNode': {'runtime': {'prompts_used': ['Create a concise and informative answer (no more than 50 words) for a given question based solely on the given documents. You must only use information from the given documents. Use an unbiased and journalistic tone. Do not repeat text. Cite the documents using Document[number] notation. If multiple documents contain the answer, cite those documents like ‘as stated in Document[number,number,etc]’. If the documents do not contain the answer to the question, say that ‘answering is not possible given the available information.’\nDocument[1]: Beer is the oldest(1)(2)(3) and most widely consumed(4) type of alcoholic drink in the world, and the third most popular drink overall after potable water and tea.(5) It is produced by the brewing and fermentation of starches, mainly derived from cereal grains—most commonly from malted barley, though wheat, maize (corn), rice, and oats are also used. During the brewing process, fermentation of the starch sugars in the wort produces ethanol and carbonation in the resulting beer.(6) Most modern beer is brewed with hops, which add bitterness and other flavours and act as a natural preservative and stabilizing agent. Other flavouring agents such as gruit, herbs, or fruits may be included or used instead of hops. In commercial brewing, the natural carbonation effect is often removed during processing and replaced with forced carbonation.(7); \n Question: What is the most popular drink?; Answer: ']}}},
+ 'root_node': 'Query',
+ 'params': {},
+ 'query': 'What is the most popular drink?',
+ 'documents': [],
+ 'node_id': 'PromptNode'}
+ ```
+
+The corresponding prompt template would look like this (provided `join_documents` and `strings_to_answers` Shaper functions are extended a bit):
+
+ ```python
+ PromptTemplate(
+ name="question-answering-with-references",
+ prompt_text="Create a concise and informative answer (no more than 50 words) for a given question "
+ "based solely on the given documents. You must only use information from the given documents. "
+ "Use an unbiased and journalistic tone. Do not repeat text. Cite the documents using Document[number] notation. "
+ "If multiple documents contain the answer, cite those documents like ‘as stated in Document[number,number,etc]’. "
+ "If the documents do not contain the answer to the question, say that ‘answering is not possible given the available information.’\n"
+ "{join(documents, '\n', '\nDocument[$idx]: $content', {'\n': ' ', '[': '(', ']': ')'})} \n Question: {query}; Answer: ",
+ output_shapers=[
+ Shaper(
+ func="strings_to_answers",
+ inputs={"strings": "results", "documents": "documents"},
+ outputs=["answers"],
+ )
+ ],
+ output_variable="answers",
+ )
+ ```
+
+We make sure that we have proper default values for the input shaping function and it is easy to understand. `{join(documents)}` should be usable in most cases. When you want to have more control over document rendering something like `join(documents, DELIMITER, PATTERN, CHAR_REPLACEMENT)` with
+
+ ```python
+ DELIMITER = "\n"
+ PATTERN = "$content" # parsable by StringTemplate using data from document.content, document.meta and the index of the document
+ CHAR_REPLACEMENT = {"[": "(", "}": ")"} # just an example what could be passed here
+ ```
+
+would do.
+
+Note that the number of how many prompts are created depends on which shaping functions are used. If you use `join(documents)` you will have only one prompt. If you omit the `join` and use `to_list(query)` instead, you will have multiple prompts (one prompt per document).
+
+# Motivation
+
+Currently using PromptNode is a bit cumbersome as:
+- for using it in popular use-cases like question-answering, it requires to add the Shapers to the pipeline manually which creates a lot of boilerplate code and is not very intuitive
+- to customize a prompt within a pipeline, you may need to change four different things: the prompt node, the prompt template, the input shapers and the output shapers. This is not ideal as it requires to write a lot of boilerplate code and makes it hard to iterate quickly on prompts.
+- if you wanted to share your prompt template with the community, you would need to share the whole pipeline (as you do need shapers), which is not ideal as it may contain other nodes that are not relevant.
+
+
+# Detailed design
+
+## General changes
+PromptTemplate gets one new attribute: `output_shapers`. These are lists of Shaper objects that are applied to the output of the prompt.
+PromptTemplate's syntax is extended to allow for the usage of shaping functions on input variables. These shaping functions are predefined.
+
+## Basic flow:
+PromptNode calls `PromptTemplate.prepare` before executing the prompt. `PromptTemplate.prepare` applies the shaping functions (if present) to the arguments of the `invocation_context`.
+PromptNode invokes the prompt on the prepared `invocation_context`.
+PromptNode calls `PromptTemplate.post_process` after executing the prompt. `PromptTemplate.post_process` makes all `output_shapers` run on the `invocation_context`.
+
+## Shaping functions
+The PromptTemplate syntax is extended to allow for the usage of shaping functions on input variables. These shaping functions should be easy to understand and use.
+We only support positional args for shaping functions. This is because we want to keep the syntax simple and we don't want to overcomplicate the parsing logic. As args any python primitive is allowed (e.g. strings, ints, floats, lists, dicts, None).
+Parsing is done by using regular expressions. If we however notice that this is not enough, we can switch to a more complex parsing library like `jinja2`.
+Here is a basic (and incomplete) example how the parsing logic could look like:
+
+ ```python
+
+ # template allowing basic list comprehensions to create the wanted string
+ template = """
+ Create a concise and informative answer (no more than 50 words) for a given question
+ based solely on the given documents. You must only use information from the given documents.
+ Use an unbiased and journalistic tone. Do not repeat text. Cite the documents using Document[number] notation.
+ If multiple documents contain the answer, cite those documents like ‘as stated in Document[number,number,etc]’.
+ If the documents do not contain the answer to the question, say that ‘answering is not possible given the available information.
+ {join(documents, new_line)} \n Question: {query}; Answer:
+ """
+
+ for group in re.findall(r'\{(.*?)\}', template):
+ if "(" in group and ")" in group:
+ function_to_call = group[:group.index("(")].strip()
+ variables_for_function = [var.strip() for var in group[group.index("(")+1:group.index(")")].split(",")]
+ print(f"Found function '{function_to_call}' with vars '{variables_for_function}'")
+ else:
+ print("Found single variable:", group)
+
+ # Returns
+ # >>> Found function 'join' with vars '['documents', 'new_line']'
+ # >>> Found single variable: query
+ ```
+
+## Prompt engineering with Haystack Pipelines
+Additionally we want to support changing the prompt via a param of `Pipeline.run`. This is useful for example if you want to fine-tune your prompt and iterate quickly on it without having to change the pipeline. The `prompt` param is a string in `Pipeline.run` which will be delegated to the `PromptNode` and then used by `PromptTemplate`. This is similar to how `Pipeline.run` works with the `query` param. Note that the `prompt` param does not affect `output_shapers`.
+
+## Misc
+Note, that `Shapers` are still usable in Pipelines as before.
+
+# Drawbacks
+
+Look at the feature from the other side: what are the reasons why we should _not_ work on it? Consider the following:
+
+- What's the implementation cost, both in terms of code size and complexity? A good day
+- Can the solution you're proposing be implemented as a separate package, outside of Haystack? No
+- Does it teach people more about Haystack? No, but it makes it easier to use especially for beginners.
+- How does this feature integrate with other existing and planned features? It doesn't change any existing features and should nicely integrate with agents.
+- What's the cost of migrating existing Haystack pipelines (is it a breaking change?)? None
+
+It also fosters a bit the nesting of components in Haystack. Although the whole PromptNode ecosystem already does this (e.g. via PromptModel, PromptTemplate being used by PromptNode), it's still a bit of a new concept. However, I think it's a good one and it's not too hard to understand.
+
+We still don't have access to PromptNode, PromptModel or the invocation layer inside of PromptTemplates. If we want PromptTemplate to access fundamental parts of them (e.g. the tokenizer), we would need to pass them to the PromptTemplate. This would make the whole system more complex, but it would be possible.
+
+# Alternatives
+
+Sub-classing specialized PromptNodes like QuestionAnsweringPromptNode, which would have the shapers already defined. This would make it easier to use, but it would be harder to iterate quickly on prompts, be less flexible and sharing is difficult. The same is true for sub-classing PromptTemplate like QuestionAnsweringPromptTemplate. Both sub-classing approaches would make it easier to use, but it would be harder to iterate quickly on prompts, be less flexible and sharing is difficult.
+
+Having `input_shapers` in the same way as `output_shapers` in the PromptTemplate. This would make it harder for users to get started as they would need to understand Shapers and which functions are relevant for input shaping.
+# Adoption strategy
+
+As the syntax for input variables in `PromptTemplate` changes we can do the following:
+- raise an error if the old syntax is used and tell the user to use the new syntax
+- support the old syntax for a while and raise a deprecation warning
+
+# How we teach this
+
+We should show how:
+- predefined PromptTemplates can be used
+- predefined PromptTemplates can be customized
+- custom PromptTemplates can be created
+
+# Unresolved questions
+
+How does `OpenAIAnswerGenerator` make use of input shaping functions and output shapers?
+- output shapers: it doesn't use them
+- input shaping functions: it uses them if they are present. If not it uses its own default functions.
diff --git a/testbed/deepset-ai__haystack/proposals/text/4284-drop-basecomponent.md b/testbed/deepset-ai__haystack/proposals/text/4284-drop-basecomponent.md
new file mode 100644
index 0000000000000000000000000000000000000000..8ed961b6cbf8c869ec087e9bbe56ae2cd46460c7
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/4284-drop-basecomponent.md
@@ -0,0 +1,950 @@
+- Title: Drop `BaseComponent` and reimplement `Pipeline`.
+- Decision driver: @ZanSara
+- Start Date: 27/02/2023
+- Proposal PR: #4284
+- Github Issue or Discussion: #2807
+
+# Summary
+
+Haystack Pipelines are very powerful objects, but they still have a number of unnecessary limitations, by design and by implementation.
+
+This proposal aims to address most of the implementation issues, some fundamental assumptions like the need for DAGs and the `BaseComponent` class, and proposes a solution for the question of `DocumentStore`'s status with respect to the `Pipeline`.
+
+
+# Motivation
+
+Pipelines are the fundamental component of Haystack and one of its most powerful concepts. At its core, a Pipeline is a DAG (Directed Acyclic Graph) of objects called Nodes, or Components, each of whom executes a specific transformation on the data flowing along the pipeline. In this way, users can combine powerful libraries, NLP models, and simple Python snippets to connect a herd of tools into a one single, coherent object that can fulfill an infinite variety of tasks.
+
+However, as it currently stands, the `Pipeline` object is also imposing a number of limitations on its use, most of which are likely to be unnecessary. Some of these include:
+
+- DAGs. DAGs are safe, but loops could enable many more usecases, like `Agents`.
+
+- `Pipeline` can select among branches, but cannot run such branches in parallel, except for some specific and inconsistent corner cases. For further reference and discussions on the topic, see:
+ - https://github.com/deepset-ai/haystack/pull/2593
+ - https://github.com/deepset-ai/haystack/pull/2981#issuecomment-1207850632
+ - https://github.com/deepset-ai/haystack/issues/2999#issuecomment-1210382151
+
+- `Pipeline`s are forced to have one single input and one single output node, and the input node has to be called either `Query` or `Indexing`, which softly forbids any other type of pipeline.
+
+- The fixed set of allowed inputs (`query`, `file_paths`, `labels`, `documents`, `meta`, `params` and `debug`) blocks several usecases, like summarization pipelines, translation pipelines, even some sort of generative pipelines.
+
+- `Pipeline`s are often required to have a `DocumentStore` _somewhere_ (see below), even in situation where it wouldn't be needed.
+ - For example, `Pipeline` has a `get_document_store()` method which iterates over all nodes looking for a `Retriever`.
+
+- The redundant concept of `run()` and `run_batch()`: nodes should take care of this distinction internally if it's important, otherwise run in batches by default.
+
+- The distinction between a `Pipeline` and its YAML representation is confusing: YAMLs can contain several pipelines, but `Pipeline.save_to_yaml()` can only save a single pipeline.
+
+In addition, there are a number of known bugs that makes the current Pipeline implementation hard to work with. Some of these include:
+
+- Branching and merging logic is known to be buggy even where it's supported.
+- Nodes can't be added twice to the same pipeline in different locations, limiting their reusability.
+- Pipeline YAML validation needs to happen with a YAML schema because `Pipeline`s can only be loaded along with all their nodes, which is a very heavy operation. Shallow or lazy loading of nodes doesn't exist.
+- Being forced to use a schema for YAML validation makes impossible to validate the graph in advance.
+
+On top of these issues, there is the tangential issue of `DocumentStore`s and their uncertain relationship with `Pipeline`s. This problem has to be taken into account during a redesign of `Pipeline` and, if necessary, `DocumentStore`s should also be partially impacted. Some of these issues include:
+
+- `DocumentStore`s are nodes in theory, but in practice they can be added to `Pipeline`s only to receive documents to be stored. On the other hand, `DocumentStore`'s most prominent usecase is as a _source_ of documents, and currently they are not suited for this task without going through an intermediary, most often a `Retriever` class.
+ - The relationship between `DocumentStore` and `Retriever` should be left as a topic for a separate proposal but kept in mind, because `Retriever`s currently act as the main interface for `DocumentStore`s into `Pipeline`s.
+
+This proposal tries to address all the above point by taking a radical stance with:
+
+- A full reimplementation of the `Pipeline` class that does not limit itself to DAGs, can run branches in parallel, can skip branches and can process loops safely.
+
+- Dropping the concept of `BaseComponent` and introducing the much lighter concept of `Node` in its place.
+
+- Define a clear contract between `Pipeline` and the `Node`s.
+
+- Define a clear place for `DocumentStore`s with respect to `Pipeline`s that doesn't forcefully involve `Retriever`s.
+
+- Redesign the YAML representation of `Pipeline`s.
+
+# Basic example
+
+A simple example of how the new Pipeline could look like is shown here. This example does not address `DocumentStore`s or YAML serialization, but rather focuses on the shift between `BaseComponent` and `Node`s.
+
+For the detailed explanation behind the design choices and all open questions, see the "Detailed Design" section and the draft implementation here: https://github.com/ZanSara/haystack-2.0-draft
+
+## Simple example
+
+This is a very simplified example that performs some mathematical operations. See below for more realistic examples.
+
+```python
+from typing import Dict, Any, List, Tuple
+from haystack.pipeline import Pipeline
+from haystack.nodes import node
+
+# A Haystack Node. See below for details about this contract.
+# Crucial components are the @node decorator and the `run()` method
+@node
+class AddValue:
+ def __init__(self, add: int = 1, input_name: str = "value", output_name: str = "value"):
+ self.add = add
+ self.init_parameters = {"add": add}
+ self.inputs = [input_name]
+ self.outputs = [output_name]
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Any],
+ stores: Dict[str, Any],
+ ):
+ my_parameters = parameters.get(name, {})
+ add = my_parameters.get("add", self.add)
+
+ for _, value in data:
+ value += add
+
+ return ({self.outputs[0]: value}, parameters)
+
+
+@node
+class Double:
+ def __init__(self, input_edge: str = "value"):
+ self.init_parameters = {"input_edge": input_edge}
+ self.inputs = [input_edge]
+ self.outputs = [input_edge]
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Any],
+ stores: Dict[str, Any],
+ ):
+ for _, value in data:
+ value *= 2
+
+ return ({self.outputs[0]: value}, parameters)
+
+
+pipeline = Pipeline()
+
+# Nodes can be initialized as standalone objects.
+# These instances can be added to the Pipeline in several places.
+addition = AddValue(add=1)
+
+# Nodes are added with a name and an node. Note the lack of references to any other node.
+pipeline.add_node("first_addition", addition, parameters={"add": 3}) # Nodes can store default parameters per node.
+pipeline.add_node("second_addition", addition) # Note that instances can be reused
+pipeline.add_node("double", Double())
+
+# Nodes are the connected as input node: [list of output nodes]
+pipeline.connect(connect_from="first_addition", connect_to="double")
+pipeline.connect(connect_from="double", connect_to="second_addition")
+
+pipeline.draw("pipeline.png")
+
+# Pipeline.run() accepts 'data' and 'parameters' only. Such dictionaries can contain
+# anything, depending on what the first node(s) of the pipeline requires.
+# Pipeline does not validate the input: the first node(s) should do so.
+results = pipeline.run(
+ data={"value": 1},
+ parameters = {"second_addition": {"add": 10}} # Parameters can be passed at this stage as well
+)
+assert results == {"value": 18}
+```
+
+The result of `Pipeline.draw()`:
+
+
+
+
+## Query Pipeline
+
+This query pipeline includes real Reader and Retriever nodes. A new set of primitives and a new DocumentStore has been used for this example: please refer to the draft implementation for details on those: https://github.com/ZanSara/haystack-2.0-draft
+
+
+
+RetrieveByBM25
+
+```python
+from typing import Dict, Any, List, Tuple
+
+import logging
+
+from haystack.data import TextQuery
+from haystack.nodes import node
+
+
+@node
+class RetrieveByBM25:
+ """
+ Simple dummy BM25 Retriever that works with MemoryStore.
+ Supports batch processing.
+ """
+ def __init__(self,
+ input_name: str = "query",
+ output_name: str = "documents_by_query",
+ default_store: str = "documents",
+ default_top_k: int = 10
+ ):
+ self.default_store = default_store
+ self.default_top_k = default_top_k
+
+ # Pipelne's contract:
+ self.init_parameters = {
+ "input_name": input_name,
+ "output_name": output_name,
+ "default_store": default_store,
+ "default_top_k": default_top_k
+ }
+ self.inputs = [input_name]
+ self.outputs = [output_name]
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Any],
+ stores: Dict[str, Any],
+ ):
+ my_parameters = parameters.get(name, {})
+ store_name = my_parameters.get("store", self.default_store)
+ top_k = my_parameters.get("top_k", self.default_top_k)
+
+ # This can be done safely, because Nodes expect the Pipeline to respect their contract.
+ # Errors here are Pipeline's responsibility, so Nodes should not care.
+ queries = data[0][1]
+
+ # Batch support is not the pipeline's business, but the node's
+ if isinstance(queries, TextQuery):
+ queries = [queries]
+ elif queries and not (
+ isinstance(queries, list) and
+ all(isinstance(query, TextQuery) for query in queries)
+ ):
+ raise ValueError(f"'{data[0][0]}' can only contain TextQuery objects. '{data[0][0]}' contains: {queries}")
+
+ if not store_name in stores.keys():
+ raise ValueError(f"No store called '{store_name}'.")
+
+ results = stores[store_name].get_relevant_documents(queries=queries, top_k=top_k)
+
+ return ({self.outputs[0]: results}, parameters)
+```
+
+
+
+
+
+ReadByTransformers
+
+```python
+from typing import Dict, Any, List, Tuple
+
+from haystack.data import TextAnswer, Span
+from haystack import node
+
+
+@node
+class ReadByTransformers:
+ """
+ Simple dummy Transformers Reader.
+ Supports batch processing.
+ """
+ def __init__(self,
+ model_name_or_path: str,
+ default_top_k: int = 5,
+ default_no_answer: bool = True,
+ default_max_seq_len: int = 256,
+ default_doc_stride: int = 128,
+ default_batch_size: int = 16,
+ default_context_window_size: int = 70,
+ input_name: str = "documents_by_query",
+ output_name: str = "answers_by_query",
+ ):
+ self.model_name_or_path = model_name_or_path
+ self.default_top_k = default_top_k
+ self.default_no_answer = default_no_answer
+ self.default_max_seq_len = default_max_seq_len
+ self.default_doc_stride = default_doc_stride
+ self.default_batch_size = default_batch_size
+ self.default_context_window_size = default_context_window_size
+ self.model = None
+
+ self.init_parameters = {
+ "input_name": input_name,
+ "output_name": output_name,
+ "model_name_or_path": model_name_or_path,
+ "default_top_k": default_top_k,
+ "default_no_answer": default_no_answer,
+ "default_max_seq_len": default_max_seq_len,
+ "default_doc_stride": default_doc_stride,
+ "default_batch_size": default_batch_size,
+ "default_context_window_size": default_context_window_size,
+ }
+ self.inputs = [input_name]
+ self.outputs = [output_name]
+
+ def warm_up(self):
+ try:
+ from transformers import pipeline
+ except Exception as e:
+ raise ImportError("Can't import 'transformers': this node won't work.") from e
+
+ if not self.model:
+ self.model = pipeline(
+ "question-answering",
+ model=self.model_name_or_path,
+ )
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Any],
+ stores: Dict[str, Any],
+ ):
+ my_parameters = parameters.get(name, {})
+ top_k = my_parameters.get("top_k", self.default_top_k)
+ no_answer = my_parameters.get("no_answer", self.default_no_answer)
+ max_seq_len = my_parameters.get("max_seq_len", self.default_max_seq_len)
+ doc_stride = my_parameters.get("doc_stride", self.default_doc_stride)
+ batch_size = my_parameters.get("batch_size", self.default_batch_size)
+ context_window_size = my_parameters.get("context_window_size", self.default_context_window_size)
+
+ documents_for_queries = data[0][1]
+
+ inputs = []
+ for query, documents in documents_for_queries.items():
+ inputs.extend([
+ self.model.create_sample(question=query.content, context=doc.content) # type: ignore
+ for doc in documents
+ ])
+
+ # Inference
+ predictions = self.model( # type: ignore
+ inputs,
+ top_k=top_k,
+ handle_impossible_answer=no_answer,
+ max_seq_len=max_seq_len,
+ doc_stride=doc_stride,
+ batch_size=batch_size,
+ )
+
+ # Builds the TextAnswer object
+ answers_for_queries = {query: [] for query in documents_for_queries.keys()}
+ for query, documents in documents_for_queries.items():
+ documents = list(documents) # FIXME consume here the iterator for now
+ docs_len = len(documents)
+ relevant_predictions = predictions[:docs_len]
+ predictions = predictions[docs_len:]
+
+ for document, prediction in zip(documents, relevant_predictions):
+ if prediction.get("answer", None):
+ context_start = max(0, prediction["start"] - context_window_size)
+ context_end = min(len(document.content), prediction["end"] + context_window_size)
+ answers_for_queries[query].append(
+ TextAnswer(
+ content=prediction["answer"],
+ score=prediction["score"],
+ context=document.content[context_start:context_end],
+ offset_in_document=Span(start=prediction["start"], end=prediction["end"]),
+ offset_in_context=Span(start=prediction["start"] - context_start, end=prediction["end"] - context_start),
+ document_id=document.id,
+ meta=document.meta,
+ )
+ )
+ elif no_answer:
+ answers_for_queries[query].append(
+ TextAnswer(
+ content="",
+ score=prediction["score"],
+ meta=document.meta,
+ )
+ )
+ answers_for_queries[query] = sorted(answers_for_queries[query], reverse=True)[:top_k]
+ return ({self.outputs[0]: answers_for_queries}, parameters)
+```
+
+
+
+```python
+import json
+from pathlib import Path
+
+from haystack.pipeline import Pipeline
+from haystack.stores import MemoryDocumentStore
+from haystack.data import TextQuery, TextDocument
+from haystack.nodes import RetrieveByBM25, ReadByTransformers
+
+import logging
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+def test_pipeline():
+ document_store = MemoryDocumentStore()
+ document_store.write_documents([
+ TextDocument(content="My name is Anna and I live in Paris."),
+ TextDocument(content="My name is Serena and I live in Rome."),
+ TextDocument(content="My name is Julia and I live in Berlin."),
+ ])
+ pipeline = Pipeline()
+ pipeline.connect_store("my_documents", document_store)
+ pipeline.add_node("retriever", RetrieveByBM25(default_store="my_documents"))
+ pipeline.add_node("reader", ReadByTransformers(model_name_or_path="distilbert-base-uncased-distilled-squad"))
+
+ pipeline.connect(connect_from="retriever", connect_to="reader")
+ pipeline.draw("query_pipeline.png")
+
+ results = pipeline.run({"query": TextQuery(content="Who lives in Berlin?")})
+
+ print(results)
+
+
+if __name__ == "__main__":
+ test_pipeline()
+```
+Pipeline diagram:
+
+
+
+Output:
+```
+{
+ "answers_by_query": {
+ "TextQuery('Who lives in Berlin?')": [
+ "TextAnswer(id='bfe8d4e1fd43db24a61847a4efea7e4b', content='Julia', content_type='text', meta={}, id_hash_keys=['content'], score=0.9149983525276184, context='My name is Julia and I live in Berlin.', offset_in_document=Span(start=11, end=16), offset_in_context=Span(start=11, end=16), document_id='6afac5b39e0ddd125a7513b13fefe27d')"
+ ]
+ }
+}
+```
+
+**IMPORTANT NOTE:** This is an example meant to showcase the concept. However, it has a major flaw: it is coupling queries and documents tightly.
+
+This does not surface as an issue in this simple example, but it limits severely the reusability of Retrievers and Readers because they do not simply output documents anymore, they output a complex structure that is only accessible by them. We should do our best to decouple values as much as possible, so that other nodes can access only the specific output required for them to operate.
+
+In this specific example, we might imagine a multi-modal Retriever connected to several single-modality Readers. To make that work, we would need a DocumentClassifier, an intermediate node to route the documents by type to each Reader: in the current design, that won't be easy. If instead queries and documents were send out in two separate lists, the task for DocumentClassifier would be far easier.
+
+This is a bright example of how good and careful design of nodes is going to be vital for the health of the framework, and will force us to be way more strict around best practices and standards.
+
+# Detailed design
+
+This section focuses on the concept rather than the implementation strategy. For a discussion on the implementation, see the draft here: https://github.com/ZanSara/haystack-2.0-draft
+
+## The Pipeline API
+
+These are the core features that drove the design of the revised Pipeline API:
+
+- An execution graph that is more flexible than a DAG.
+- A clear place for `DocumentStore`s
+
+Therefore, the revised Pipeline object has the following API:
+
+- Core functions:
+ - `__init__(metadata=None)`: Creates an empty Pipeline. Note the `metadata` field, which can be used to carry metadata info. Such metadata might come useful in `Application`s (see below) to distinguish `Pipeline`s, tag them, describe them, etc.
+ - `run(data, parameters, debug)`: the core of the class. Relies on `networkx` for most of the heavy-lifting. Check out the implementation (https://github.com/ZanSara/haystack-2.0-draft/blob/main/new-haystack/haystack/pipeline/pipeline.py) for details: the code is heavily commented on the main loop and on the handling of non-trivial execution paths like branch selection, parallel branch execution, loops handling, multiple input/output and so on. For the effects of `debug`, see the dedicated section below.
+ - `draw(path)`: as in the old Pipeline object. Based on `pygraphviz` (which requires `graphviz`), but we might need to look for pure Python alternatives based on Matplotlib to reduce our dependencies.
+- Graph building:
+ - `add_node(name, node, parameters)`: adds a disconnected node to the graph. It expects Haystack nodes in the `node` parameter and will fail if they aren't respecting the contract. See below for a more detailed discussion of the Nodes' contract.
+ - `get_node(name)`: returns the node's information stored in the graph
+ - `connect(input_node, output_node)`: connects nodes together. It will fail if the nodes inputs and outputs do not match: see the Nodes' contract to understand how Nodes can declare their I/O.
+- Docstore management:
+ - `add_store(name, store)`: adds a DocumentStore to the stores that are passed down to the nodes through the `stores` variable.
+ - `list_stores()`: returns all connected stores.
+ - `get_store(name)`: returns a specific document store by name.
+
+Example pipeline topologies supported by the new implementation (images taken from the test suite):
+
+
+Merging pipeline
+
+
+
+In this pipeline, several nodes send their input into a single output node. Note that this pipeline has several starting nodes, something that is currently not supported by Haystack's `Pipeline`.
+
+
+
+
+Branching pipeline with branch skipping
+
+
+
+In this pipeline, only one edge will run depending on the decision taken by the `remainder` node. Note that this pipeline has several terminal nodes, something that is currently not supported by Haystack's `Pipeline`.
+
+
+
+
+Branching pipeline with parallel branch execution
+
+
+
+In this pipeline, all the edges that leave `enumerate` are run by `Pipeline`. Note that this usecase is currently not supported by Haystack's `Pipeline`.
+
+
+
+
+Branching pipeline with branch skipping and merge
+
+
+
+In this pipeline, the merge node can understand that some of its upstream nodes will never run (`remainder` selects only one output edge) and waits only for the inputs that it can receive, so one from `remainder`, plus `no-op`.
+
+
+
+
+Looping pipeline
+
+
+
+This is a pipeline with a loop and a counter that statefully counts how many times it has been called.
+
+Note that the new `Pipeline` can set a maximum number of allowed visits to nodes, so that loops are eventually stopped if they get stuck.
+
+
+
+
+Looping pipeline with merge
+
+
+
+This is a pipeline with a loop and a counter that statefully counts how many times it has been called. There is also a merge node at the bottom, which shows how Pipeline can wait for the entire loop to exit before running `sum`.
+
+
+
+
+Arbitrarily complex pipeline
+
+
+
+This is an example of how complex Pipelines the new objects can support. This pipeline combines all cases above:
+- Multiple inputs
+- Multiple outputs
+- Decision nodes and branches skipped due to a selection
+- Distribution nodes and branches executed in parallel
+- Merge nodes where it's unclear how many edges will actually carry values
+- Merge nodes with repeated inputs (`sum` takes three `value` edges) or distinct inputs (`diff` takes `value` and `sum`)
+- Loops along a branch
+
+
+NOTE: the draft implementation supports all of these topologies already. You can find the code for each of these pipelines under https://github.com/ZanSara/new-haystack-pipeline-draft/tree/main/new-haystack/tests/integration
+
+## The Node contract
+
+A Haystack node is any class that abides the following contract:
+
+```python
+# This decorator does very little, but is necessary for Pipelines to recognize
+# this class as a Haystack node. Check its implementation for details.
+@node
+class MyNode:
+
+ def __init__(self, model_name: str: "deepset-ai/a-model-name"):
+ """
+ Haystack nodes should have an `__init__` method where they define:
+
+ - `self.inputs = []`:
+ A list with all the edges they can possibly receive input from
+
+ - `self.outputs = []`:
+ A list with the edges they might possibly produce as output
+
+ - `self.init_parameters = {}`:
+ Any state they wish to be persisted in their YAML serialization.
+ These values will be given to the `__init__` method of a new instance
+ when the pipeline is deserialized.
+
+ The `__init__` must be extremely lightweight, because it's a frequent
+ operation during the construction and validation of the pipeline. If a node
+ has some heavy state to initialize (models, backends, etc...) refer to the
+ `warm_up()` method.
+ """
+ # Lightweight state can be initialized here, for example storing the model name
+ # to be loaded later. See self.warm_up()
+ self.model = None
+ self.model_name = model_name
+ self.how_many_times_have_I_been_called = 0
+
+ # Contract - all three are mandatory.
+ self.init_parameters = {"model_name": model_name}
+ self.inputs = ["input_name"]
+ self.outputs = ["output_name"]
+
+ def warm_up(self):
+ """
+ Optional method.
+
+ This method is called by Pipeline before the graph execution.
+ Make sure to avoid double-initializations, because Pipeline will not keep
+ track of which nodes it called `warm_up` on.
+ """
+ if not self.model:
+ self.model = AutoModel.load_from_pretrained(self.model_name)
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Any],
+ stores: Dict[str, Any],
+ ):
+ """
+ Mandatory method.
+
+ This is the method where the main functionality of the node should be carried out.
+ It's called by `Pipeline.run()`, which passes the following parameters to it:
+
+ - `name: str`: the name of the node. Allows the node to find its own parameters in the `parameters` dictionary (see below).
+
+ - `data: List[Tuple[str, Any]]`: the input data.
+ Pipeline guarantees that the following assert always passes: `assert self.inputs == [name for name, value in data]`,
+ which means that:
+ - `data` is of the same length as `self.inputs`.
+ - `data` contains one tuple for each string stored in `self.inputs`.
+ - no guarantee is given on the values of these tuples: notably, if there was a decision node upstream, some values might be `None`.
+ For example, if a node declares `self.inputs = ["value", "value"]` (think of a Sum node), `data` might look like:
+ - `[("value", 1), ("value", 10)]`
+ - `[("value", None), ("value", 10)]`
+ - `[("value", None), ("value", None)]`, or even
+ - `[("value", 1), ("value", ["something", "unexpected"])]`
+ but it will never look like:
+ - `[("value", 1), ("value", 10), ("value", 100)]`,
+ - `[("value": 15)]` or
+ - `[("value": 15), ("unexpected", 10)]`.
+
+ - `parameters: Dict[str, Dict[str, Any]]`: a dictionary of dictionaries with all the parameters for all nodes.
+ Note that all nodes have access to all parameters for all other nodes: this might come handy to nodes like `Agent`s, that
+ want to influence the behavior of nodes downstream.
+ Nodes can access their own parameters using `name`, but they must not assume their name is present in the dictionary.
+ Therefore the best way to get the parameters is with `my_parameters = parameters.get(name, {})`
+
+ - `stores`: a dictionary of all the (Document)Stores connected to this pipeline.
+
+ Pipeline expect the output of this function to be a tuple in the following format:
+
+ `( {edge: value for edge in }, {the parameters dictionary})
+
+ Which means that:
+ - Nodes are not forced to produce output on all the expected outputs: for example nodes taking a decision, like classifiers,
+ can produce output on a subset of the expected output edges and Pipeline will figure out the rest.
+ - Nodes must not add any key in the data dictionary that is not present in `self.outputs`,
+ - Nodes can alter the content of `parameters` and their changes will be propagated downstream.
+ """
+ self.how_many_times_have_I_been_called += 1
+
+ value = data[0][1]
+ print(f"Hello I'm {name}! This instance have been called {self.how_many_times_have_I_been_called} times and this is the value I received: {value}")
+
+ return ({self.outputs[0]: value}, parameters)
+```
+
+This contract is stored in the docstring of `@node` and acts as the single source of truth.
+
+Note how there are a few fundamental shifts from the old concept of pipelines and nodes:
+
+- There's no base class for nodes anymore. A node is whatever object is decorated with `@node`.
+
+- Pipelines can now be validated by instantiating them, and instantiation is going to be very fast, because the nodes are supposed to defer heavy operations to the `warm_up()` method. This does not prevent us from having shallow validation with a schema, but gives us more room for other validation strategies too.
+
+- Edges are designed to carry one value, be it a primitive like an `int` or a complex dictionary or an object. We have no more "fat edges" by default that carry complex output: we can now safely output several values on several edges and, if need be, connect multiple edges between the same two nodes.
+ - For example, if `node_a` outputs `value_1` and `value_2`, both these edges can be given to a `sum` node.
+
+- There's no more naming ambiguity between nodes and components.
+
+### Nodes discovery logic
+
+When pipelines are loaded from YAML, Pipeline needs to find the classes definition somewhere in the imported modules. Currently, at initialization `Pipeline` looks for classes which is decorated with the `@node` decorator under `haystack`, however such search can be extended (or narrowed) by setting the `search_nodes_in` init parameter of `Pipeline`. Note that it will try to import any module that is not imported yet.
+
+Search might fail in narrow corner cases: for example, inner classes are not discovered (often the case in tests). For these scenarios, `Pipeline` also accepts an `extra_nodes` init parameter that allows users to explicitly provide a dictionary of nodes to merge with the other discovered nodes.
+
+Name collisions are handled by prefixing the node name with the name of the module it was imported from.
+
+## Validation
+
+Pipeline performs validation on the edge name level: when calling `Pipeline.connect()`, it uses the values of the nodes' `self.inputs` and `self.outputs` to make sure that the connection is possible.
+
+Nodes are required, by contract, to explicitly define their inputs and outputs, and these values are used by the `connect` method to validate the connection, and by the `run` method to route values.
+
+For example, let's imagine we have two nodes with the following I/O declared:
+
+```python
+@node
+class NodeA:
+
+ def __init__(self):
+ self.inputs = ["input"]
+ self.outputs = ["intermediate_value"]
+
+ def run(self):
+ pass
+
+@node
+class NodeB:
+
+ def __init__(self):
+ self.inputs = ["intermediate_value"]
+ self.outputs = ["output"]
+
+ def run(self):
+ pass
+```
+
+This is the behavior of `Pipeline.connect()`:
+
+```python
+pipeline.connect('node_a', 'node_b')
+# Succeeds: no output
+
+pipeline.connect('node_a', 'node_a')
+# Traceback (most recent call last):
+# File "/home/sara/work/haystack-2/example.py", line 29, in
+# pipeline.connect('node_a', 'node_a')
+# File "/home/sara/work/haystack-2/new-haystack/haystack/pipeline/pipeline.py", line 224, in connect
+# raise PipelineConnectError(
+# haystack.pipeline._utils.PipelineConnectError: Cannot connect 'node_a' with 'node_a' with an edge named 'intermediate_value': their declared inputs and outputs do not match.
+# Upstream node 'node_a' declared these outputs:
+# - intermediate_value (free)
+# Downstream node 'node_a' declared these inputs:
+# - input (free)
+
+pipeline.connect('node_b', 'node_a')
+# Traceback (most recent call last):
+# File "/home/sara/work/haystack-2/example.py", line 29, in
+# pipeline.connect('node_b', 'node_a')
+# File "/home/sara/work/haystack-2/new-haystack/haystack/pipeline/pipeline.py", line 224, in connect
+# raise PipelineConnectError(
+# haystack.pipeline._utils.PipelineConnectError: Cannot connect 'node_b' with 'node_a' with an edge named 'output': their declared inputs and outputs do not match.
+# Upstream node 'node_b' declared these outputs:
+# - output (free)
+# Downstream node 'node_a' declared these inputs:
+# - input (free)
+```
+
+This type of error reporting was found especially useful for nodes that declare a variable number and name of inputs and outputs depending on their initialization parameters (think of language classifiers, for example).
+
+One shortcoming is that currently Pipeline "trusts" the nodes to respect their own declarations. So if a node states that it will output `intermediate_value`, but outputs something else once run, `Pipeline` will fail. We accept this failure as a "contract breach": the node should fix its behavior and `Pipeline` should not try to prevent such scenarios.
+
+Note: the draft implementation does not validate the type of the values, but only their names. So two nodes might agree to pass a variable called `documents` to each other, but one might output a `Set` when the receiver expects a `List`, and that will cause a crash. However, such check will be added.
+
+Other features planned for addition are:
+- automatically connect nodes if matching edges are found, even if the match is partial
+- double check for unconnected nodes and edges
+
+### Parameters hierarchy
+
+Parameters can be passed to nodes at several stages, and they have different priorities. Here they're listed from least priority to top priority.
+
+1. **Node's default `__init__` parameters**: nodes's `__init__` can provide defaults. Those are used only if no other parameters are passed at any stage.
+2. **Node's `__init__` parameters**: at initialization, nodes might be given values for their parameters. These are stored within the node instance and, if the instance is reused in the pipeline several times, they will be the same on all of them
+3. **Pipeline's `add_node()`**: When added to the pipeline, users can specify some parameters that have to be given only to that node specifically. They will override the node instance's parameters, but they will be applied only in that specific location of the pipeline and not be applied to other instances of the same node anywhere else in the graph.
+4. **Pipeline's `run()`**: `run()` also accepts a dictionary of parameters that will override all conflicting parameters set at any level below, quite like Pipeline does today.
+
+Example:
+
+```python
+
+class Node:
+ def __init__(self, value_1: int = 1, value_2: int = 1, value_3: int = 1, value_4: int = 1):
+ ...
+
+node = Node(value_2=2, value_3=2, value_4=2)
+pipeline = Pipeline()
+pipeline.add_node("node", node, parameters={"value_3": 3, "value_4": 3})
+...
+pipeline.run(data={...}, parameters={"node": {"value_4": 4}})
+
+# Node will receive {"value_1": 1, "value_2": 2, "value_3": 3,"value_4": 4}
+```
+
+### Debug mode
+
+The strategy towards debugging:
+
+- Pipelines are aware of debug mode, Nodes aren't.
+- Pipeline will produce debug output entirely through machine-readable logs.
+
+This implies that nodes don't have any **standardized** way to tell whether they're being debugged (while nothing prevents them from accepting a `debug` parameter if need be), and that they don't have any **standardized** way to output debug info except from logs (while nothing prevents them from supporting a `debug` edge if need be).
+
+We took this decision to encourage nodes to implement the same behavior regardless of debug mode, and to incentivize the use of machine-readable logs.
+
+### Evaluation
+
+Evaluation of Pipelines is a topic too wide for the scope of this proposal, so it has been left out on purpose. We will open a separate proposal after this one has been approved.
+
+### Pipeline Serialization
+
+_(Disclaimer: no draft implementation available yet)_
+
+We decide to remove the possibility of serializing single `Pipeline`s and to defer such task to a dedicated marshalling utility. This decision was made to remove the current ambiguity of `Pipeline` YAMLs being able to store several `Pipeline`s, while `Pipeline.save_to_yaml()` can only save one.
+
+In code, saving and loading pipelines will look like this:
+
+```python
+from haystack.pipelines import Pipeline, save_pipelines, load_pipelines
+
+query_pipeline = Pipeline()
+indexing_pipeline = Pipeline()
+# .. assemble the pipelines ...
+
+# Save the pipelines
+save_pipelines(
+ pipelines={
+ "query": query_pipeline,
+ "indexing": indexing_pipeline,
+ },
+ path="my_pipelines.json",
+ writer=json.dumps
+)
+
+# Load the pipelines
+new_pipelines = load_pipelines(
+ path="my_pipelines.json",
+ reader=json.loads
+)
+
+assert new_pipelines["query"] == query_pipeline
+assert new_pipelines["indexing"] == indexing_pipeline
+```
+
+Note how the save/load functions accept a writer/reader function: this choice frees us from committing strongly to a specific template language, and although a default will be set (be it YAML, TOML, HCL or anything else) the decision can be overridden by passing another explicit reader/writer function to the `save_pipelines`/`load_pipelines` functions.
+
+This is how the resulting file will look like, assuming a JSON writer was chosen.
+
+`my_pipeline.json`
+
+```json
+{
+ # A list of "dependencies" for the application.
+ # Used to ensure all external nodes are present when loading.
+ "dependencies" : [
+ "haystack == 2.0.0",
+ "my_custom_node_module == 0.0.1",
+ ],
+
+ # Stores are defined here, outside single pipeline graphs.
+ # All pipelines have access to all these docstores.
+ "stores": {
+ # Nodes will be able to access them by the name defined here,
+ # in this case `my_first_store` (see the retrievers below).
+ "my_first_store": {
+ # class_name is mandatory
+ "class_name": "InMemoryDocumentStore",
+ # Then come all the additional parameters for the store
+ "use_bm25": true
+ },
+ "my_second_store": {
+ "class_name": "InMemoryDocumentStore",
+ "use_bm25": false
+ }
+ },
+
+ # Nodes are defined here, outside single pipeline graphs as well.
+ # All pipelines can use these nodes. Instances are re-used across
+ # Pipelines if they happen to share a node.
+ "nodes": {
+ # In order to reuse an instance across multiple nodes, instead
+ # of a `class_name` there should be a pointer to another node.
+ "my_sparse_retriever": {
+ # class_name is mandatory, unless it's a pointer to another node.
+ "class_name": "BM25Retriever",
+ # Then come all the additional init parameters for the node
+ "store_name": "my_first_store",
+ "top_k": 5
+ },
+ "my_dense_retriever": {
+ "class_name": "EmbeddingRetriever",
+ "model_name": "deepset-ai/a-model-name",
+ "store_name": "my_second_store",
+ "top_k": 5
+ },
+ "my_ranker": {
+ "class_name": "Ranker",
+ "inputs": ["documents", "documents"],
+ "outputs": ["documents"],
+ },
+ "my_reader": {
+ "class_name": "Reader",
+ "model_name": "deepset-ai/another-model-name",
+ "top_k": 3
+ }
+ },
+
+ # Pipelines are defined here. They can reference all nodes above.
+ # All pipelines will get access to all docstores
+ "pipelines": {
+ "sparse_question_answering": {
+ # Mandatory list of edges. Same syntax as for `Pipeline.connect()`
+ "edges": [
+ ("my_sparse_retriever", ["reader"])
+ ],
+ # To pass some parameters at the `Pipeline.add_node()` stage, add them here.
+ "parameters": {
+ "my_sparse_retriever": {
+ "top_k": 10
+ }
+ },
+ # Metadata can be very valuable for dC and to organize larger Applications
+ "metadata": {
+ "type": "question_answering",
+ "description": "A test pipeline to evaluate Sparse QA.",
+ "author": "ZanSara"
+ },
+ # Other `Pipeline.__init__()` parameters
+ "max_allowed_loops": 10,
+ },
+ "dense_question_answering": {
+ "edges": [
+ ("my_dense_retriever", ["reader"])
+ ],
+ "metadata": {
+ "type": "question_answering",
+ "description": "A test pipeline to evaluate Sparse QA.",
+ "author": "an_intern"
+ }
+ },
+ "hybrid_question_answering": {
+ "edges": [
+ ("my_sparse_retriever", ["ranker"]),
+ ("my_dense_retriever", ["ranker"]),
+ ("ranker", ["reader"]),
+ ],
+ "metadata": {
+ "type": "question_answering",
+ "description": "A test pipeline to evaluate Hybrid QA.",
+ "author": "the_boss"
+ }
+ }
+ }
+}
+```
+
+# Open questions
+
+None left.
+
+# Drawbacks
+
+There are a number of drawbacks about the proposed approach:
+
+- Migration is going to be far from straightforward for us. Although many nodes can probably work with minor adaptations into the new system, it would be beneficial for most of them to be reduced to their `run()` method, especially indexing nodes. This means that nodes need, at least, to be migrated one by one to the new system and code copied over.
+
+- Migration is going to be far from straightforward for the users: see "Adoption strategy".
+
+- This system allows for pipelines with more complex topologies, which brings the risk of more corner cases. `Pipeline.run()` must be made very solid in order to avoid this scenario.
+
+- Nodes might break more easily while running due to unexpected inputs. While well designed nodes should internally check and deal with such situations, we might face larger amount of bugs due to our failure at noticing the lack of checks at review time.
+
+- The entire system work on the assumption that nodes are well behaving and "polite" to other nodes, for example not touching their parameters unless necessary, etc. Malicious or otherwise "rude" nodes can wreak havoc in `Pipeline`s by messing with other node's parameters.
+
+## Known limitations
+
+- **Reusability of nodes across Pipelines in REST API.** Currently, REST API are designed in such a way that a separate worker is spawned for each pipeline deployed. That makes sharing node instances across them a non-starter. However, we believe this specific limitation can be addressed by a different approach to the problem, like splitting pipelines in a way that shared nodes are stored in a dedicated sub-pipeline and so on. We postpone addressing this problem when it arises, as we don't consider it blocking and workarounds can be found.
+
+# Adoption strategy
+
+Old and new `Pipeline` and nodes are going to be fully incompatible. We must provide a migration script that can convert their existing pipeline YAMLs into the new ones.
+
+This proposal is best thought as part of the design of Haystack 2.0.
+
+## Rollout process
+
+These changes are going to be release with Haystack 1.x in a hidden internal package called `haystack.v2.pipelines`, and won't be promoted straight away.
+
+We will progressively add nodes to this `haystack.v2` package and build a folder structure under it (`haystack.v2.nodes`, `haystack.v2.stores`, ...) version after version, until we believe the content of the package is usable. Documentation will be built in parallel and we will progressively start pushing users towards the 2.0 API.
+Power users like dC and other Haystack experts will be able to test out these changes from the start and provide feedback while still in Haystack 1.x.
+
+Once we're confident that the v2 version covers all of Haystack v1.x usecases, Haystack 2.0 will be released and the packages are going to be switched: the content of `haystack` will be moved into `haystack.v1` and deprecated, and the content of `haystack.v2` will me moved under `haystack`. A few 2.x versions later, `haystack.v1` will then be dropped.
diff --git a/testbed/deepset-ai__haystack/proposals/text/4370-documentstores-and-retrievers.md b/testbed/deepset-ai__haystack/proposals/text/4370-documentstores-and-retrievers.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf2b985bc28b5c1cfdd5bbefece8475ed1e8697b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/4370-documentstores-and-retrievers.md
@@ -0,0 +1,239 @@
+- Title: `DocumentStores` and `Retrievers`
+- Decision driver: @ZanSara
+- Start Date: 2023-03-09
+- Proposal PR: 4370
+- Github Issue or Discussion: (only if available, link the original request for this change)
+
+# Summary
+
+Haystack's Document Stores are a very central component in Haystack and, as the name suggest, they were initially designed around the concept of `Document`.
+
+As the framework grew, so did the number of Document Stores and their API, until the point where keeping them aligned aligned on the same feature set started to become a serious challenge.
+
+In this proposal we outline a reviewed design of the same concept.
+
+Note: these stores are designed to work **only** alongside Haystack 2.0 Pipelines (see https://github.com/deepset-ai/haystack/pull/4284)
+
+# Motivation
+
+Current `DocumentStore` face several issues mostly due to their organic growth. Some of them are:
+
+- `DocumentStore`s perform the bulk of retrieval, but they need to be tightly coupled to a `Retriever` object to work. We believe this coupling can be broken by a clear API boundary between `DocumentStores`, `Retriever`s and `Embedder`s. In this PR we focus on decoupling them.
+
+- `DocumentStore`s tend to bring in complex dependencies, so less used stores should be easy to decouple into external packages at need.
+
+# Basic example
+
+Stores will have to follow a contract rather than subclassing a base class. We define a contract for `DocumentStore` that defines a very simple CRUD API for Documents. Then, we provide one implementation for each underlying technology (`MemoryDocumentStore`, `ElasticsearchDocumentStore`, `FaissDocumentStore`) that respects such contract.
+
+Once stores are defined, we will create one `Retriever` for each `DocumentStore`. Such retrievers are going to be highly specialized nodes that expect one specific document store and can handle all its specific requirements without being bound to a generic interface.
+
+For example, this is how embedding-based retrieval would look like:
+
+```python
+from haystack import Pipeline
+from haystack.nodes import (
+ TxtConverter,
+ PreProcessor,
+ DocumentWriter,
+ DocumentEmbedder,
+ StringEmbedder,
+ MemoryRetriever,
+ Reader,
+)
+from haystack.document_stores import MemoryDocumentStore
+
+docstore = MemoryDocumentStore()
+
+indexing_pipe = Pipeline()
+indexing_pipe.add_store("document_store", docstore)
+indexing_pipe.add_node("txt_converter", TxtConverter())
+indexing_pipe.add_node("preprocessor", PreProcessor())
+indexing_pipe.add_node("embedder", DocumentEmbedder(model_name="deepset/model-name"))
+indexing_pipe.add_node("writer", DocumentWriter(store="document_store"))
+indexing_pipe.connect("txt_converter", "preprocessor")
+indexing_pipe.connect("preprocessor", "embedder")
+indexing_pipe.connect("embedder", "writer")
+
+indexing_pipe.run(...)
+
+query_pipe = Pipeline()
+query_pipe.add_store("document_store", docstore)
+query_pipe.add_node("embedder", StringEmbedder(model_name="deepset/model-name"))
+query_pipe.add_node("retriever", MemoryRetriever(store="document_store", retrieval_method="embedding"))
+query_pipe.add_node("reader", Reader(model_name="deepset/model-name"))
+query_pipe.connect("embedder", "retriever")
+query_pipe.connect("retriever", "reader")
+
+results = query_pipe.run(...)
+```
+
+Note a few key differences with the existing Haystack process:
+
+- During indexing we do not use any `Retriever`, but rather a `DocumentEmbedder`. This class accepts a model name and simply adds embeddings to the `Document`s it receives.
+
+- We used an explicit `DocumentWriter` node instead of adding the `DocumentStore` at the end of the pipeline. That node will be generic for any document store, because the `DocumentStore` contract declares a `write_documents` method (see "Detailed Design").
+
+- During query, the first step is not a `Retriever` anymore, but a `StringEmbedder`. Such node will convert the query into its embedding representation and forward it over to a `Retriever` that expects it. In this case, an imaginary `MemoryRetriever` can be configured to expect an embedding by setting the `retrieval_method` flag to `embedding`.
+
+
+# Detailed design
+
+## `DocumentStore` contract
+
+Here is a summary of the basic contract that all `DocumentStore`s are expected to follow.
+
+```python
+class MyDocumentStore:
+
+ def count_documents(self, **kwargs) -> int:
+ ...
+
+ def filter_documents(self, filters: Dict[str, Any], **kwargs) -> List[Document]:
+ ...
+
+ def write_documents(self, documents: List[Document], **kwargs) -> None:
+ ...
+
+ def delete_documents(self, ids: List[str], **kwargs) -> None:
+ ...
+```
+
+The contract is quite narrow to encourage the use of specialized nodes. `DocumentStore`s' primary focus should be storing documents: the fact that most vector stores also support retrieval should be outside of this abstraction and made available through methods that do not belong to the contract. This allows `Retriever`s to carry out their tasks while avoiding clutter on `DocumentStore`s that do not support some features.
+
+Note also how the concept of `index` is not present anymore, as it it mostly ES-specific.
+
+For example, a `MemoryDocumentStore` could offer the following API:
+
+```python
+class MemoryDocumentStore:
+
+ def filter_documents(self, filters: Dict[str, Any], **kwargs) -> List[Document]:
+ ...
+
+ def write_documents(self, documents: List[Document], **kwargs) -> None:
+ ...
+
+ def delete_documents(self, ids: List[str], **kwargs) -> None:
+ ...
+
+ def bm25_retrieval(
+ self,
+ queries: List[str], # Note: takes strings!
+ filters: Optional[Dict[str, Any]] = None,
+ top_k: int = 10
+ ) -> List[List[Document]]:
+ ...
+
+ def vector_similarity_retrieval(
+ self,
+ queries: List[np.array], # Note: takes embeddings!
+ filters: Optional[Dict[str, Any]] = None,
+ top_k: int = 10
+ ) -> List[List[Document]]:
+ ...
+
+ def knn_retrieval(
+ self,
+ queries: List[np.array], # Note: takes embeddings!
+ filters: Optional[Dict[str, Any]] = None,
+ top_k: int = 10
+ ) -> List[List[Document]]:
+ ...
+```
+
+In this way, a `DocumentWriter` could easily use the `write_documents` method defined in the contract on all document stores, while `MemoryRetriever` can leverage the fact that it only supports `MemoryDocumentStore`, so it can assume all its custom methods like `bm25_retrieval`, `vector_similarity_retrieval`, etc... are present.
+
+Here is, for comparison, an example implementation of a `DocumentWriter`, a document-store agnostic node.
+
+```python
+@node
+class DocumentWriter:
+
+ def __init__(self, inputs=['documents'], stores=["documents"]):
+ self.store_names = stores
+ self.inputs = inputs
+ self.outputs = []
+ self.init_parameters = {"inputs": inputs, "stores": stores}
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Dict[str, Any]]
+ ) -> Dict[str, Any]:
+ writer_parameters = parameters.get(name, {})
+ stores = writer_parameters.pop("stores", {})
+
+ all_documents = []
+ for _, documents in data:
+ all_documents += documents
+
+ for store_name in self.store_names:
+ stores[store_name].write_documents(documents=all_documents, **writer_parameters)
+
+ return ({}, parameters)
+```
+This class does not check which document store it is using, because it can safely assume they are going to have a `write_documents` method.
+
+Here instead we can see an example implementation of a `MemoryRetriever`, a document-store aware node.
+
+```python
+@node
+class MemoryRetriever:
+
+ def __init__(self, inputs=['query'], output="documents", stores=["documents"]):
+ self.store_names = stores
+ self.inputs = inputs
+ self.outputs = [output]
+ self.init_parameters = {"inputs": inputs, "output": output "stores": stores}
+
+ def run(
+ self,
+ name: str,
+ data: List[Tuple[str, Any]],
+ parameters: Dict[str, Dict[str, Any]]
+ ) -> Dict[str, Any]:
+
+ retriever_parameters = parameters.get(name, {})
+ stores = retriever_parameters.pop("stores", {})
+ retrieval_method = retriever_parameters.pop("retrieval_method", "bm25")
+
+ for store_name in self.store_names:
+ if not isinstance(stores[store_name], MemoryStore):
+ raise ValueError("MemoryRetriever only works with MemoryDocumentStore.")
+
+ if retrieval_method == "bm25":
+ documents = stores[store_name].bm25_retrieval(queries=queries, **retriever_parameters)
+ elif retrieval_method == "embedding":
+ documents = stores[store_name].vector_similarity_retrieval(queries=queries, **retriever_parameters)
+ ...
+
+ return ({self.outputs[0]: documents}, parameters)
+```
+
+Note how `MemoryRetriever` is making use of methods that are not specified in the contract and therefore has to check that the document store it has been connected to is a proper one.
+
+# Drawbacks
+
+### Migration effort
+
+We will need to migrate all `DocumentStore`s and heavily cut their API. Although it is going to be a massive undertaking, this process will allow us to drop less used `DocumentStore` backends and focus on the most important ones. It will also highly reduce the code we have to maintain.
+
+We will also need to re-implement the ehtire Retrieval stack. We believe a lot of code could be reused, but we will focus on leveraging each document store facilities a lot more, and that will require almost complete rewriters. The upside is that the resulting code should be several times shorter, so the maintenance burden should be limited.
+
+# Alternatives
+
+We could force support for the old Docstores into the new Pipelines, but I see no value in such effort given that with the same investment we can get a massively smaller codebase.
+
+# Adoption strategy
+
+This proposal is part of the Haystack 2.0 rollout strategy. See https://github.com/deepset-ai/haystack/pull/4284.
+
+# How we teach this
+
+Documentation is going to be crucial, as much as tutorials and demos. We plan to start working on those as soon as basic nodes (one reader and one retriever) are added to Haystack v2 and `MemoryDocumentStore` receives its first implementation.
+
+# Open questions
+
+- We should enable validation of `DocumentStore`s for nodes that are document-store aware. It could be done by an additional `validation` method with relative ease, but it's currently not mentioned in the node/pipeline contract.
diff --git a/testbed/deepset-ai__haystack/proposals/text/5289-recentness-ranker.md b/testbed/deepset-ai__haystack/proposals/text/5289-recentness-ranker.md
new file mode 100644
index 0000000000000000000000000000000000000000..b98b1224dd3a3b3996f6f655c3a737a7e4bfa079
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5289-recentness-ranker.md
@@ -0,0 +1,185 @@
+- Title: Addition of a RecentnessRanker node
+- Decision driver: @elundaeva
+- Start Date: 2023-07-05
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/5289
+- Github Issue or Discussion: some links available in the "Detailed design" section below
+
+# Summary
+
+This ranker allows to have retrieved documents sorted not only by relevance (default) but also with recency factored in.
+
+# Basic example
+
+The RecentnessRanker would be instantiated as follows:
+
+ ``` python
+
+ ranker = RecentnessRanker(
+ date_identifier="date",
+ weight="0.5",
+ top_k=3,
+ method="reciprocal_rank_fusion",
+ )
+ ```
+
+And here is an example of how the node would work in the context of a generative QA pipeline:
+
+ ``` yaml
+
+version: '1.18.0'
+name: 'Example pipeline'
+
+components:
+- name: DocumentStore
+ type: DeepsetCloudDocumentStore
+- name: EmbeddingRetriever
+ type: EmbeddingRetriever
+ params:
+ document_store: DocumentStore
+ embedding_model: [embedding model here]
+ model_format: sentence_transformers
+ top_k: 30
+- name: BM25Retriever
+ type: BM25Retriever
+ params:
+ document_store: DocumentStore
+ top_k: 30
+- name: JoinDocuments
+ type: JoinDocuments
+ params:
+ top_k_join: 30
+ join_mode: reciprocal_rank_fusion
+- name: Ranker
+ type: SentenceTransformersRanker
+ params:
+ model_name_or_path: [cross-encoder model here]
+ top_k: 15
+- name: RecentnessRanker
+ type: RecentnessRanker
+ params:
+ date_identifier: release_date
+ top_k: 3
+ method: score
+- name: qa_template
+ type: PromptTemplate
+ params:
+ output_parser:
+ type: AnswerParser
+ prompt: "prompt text here"
+- name: PromptNode
+ type: PromptNode
+ params:
+ default_prompt_template: qa_template
+ max_length: 300
+ model_kwargs:
+ temperature: 0
+ model_name_or_path: gpt-3.5-turbo
+- name: FileTypeClassifier
+ type: FileTypeClassifier
+- name: TextConverter
+ type: TextConverter
+- name: PDFConverter
+ type: PDFToTextConverter
+- name: Preprocessor
+ params:
+ language: en
+ split_by: word
+ split_length: 200
+ split_overlap: 10
+ split_respect_sentence_boundary: true
+ type: PreProcessor
+
+pipelines:
+- name: query
+ nodes:
+ - name: EmbeddingRetriever
+ inputs: [Query]
+ - name: BM25Retriever
+ inputs: [Query]
+ - name: JoinDocuments
+ inputs: [EmbeddingRetriever, BM25Retriever]
+ - name: Ranker
+ inputs: [JoinDocuments]
+ - name: RecentnessRanker
+ inputs: [Ranker]
+ - name: PromptNode
+ inputs: [RecentnessRanker]
+
+- name: indexing
+ nodes:
+ - inputs:
+ - File
+ name: FileTypeClassifier
+ - inputs:
+ - FileTypeClassifier.output_1
+ name: TextConverter
+ - inputs:
+ - FileTypeClassifier.output_2
+ name: PDFConverter
+ - inputs:
+ - TextConverter
+ - PDFConverter
+ name: Preprocessor
+ - inputs:
+ - Preprocessor
+ name: EmbeddingRetriever
+ - inputs:
+ - EmbeddingRetriever
+ name: DocumentStore
+
+ ```
+
+# Motivation
+
+Initially this ranker was implemented by Timo for a customer case where the date of the document mattered for retrieval. The reason we would like to add it to Haystack is because we see wider use for this node in future customer and community cases. One example where document recency matters is in a QA solution based on technical documentation with release notes of a software product - the older release notes should naturally have less priority in the responses than the most recent ones. And another example is news content - news articles retrieval can definitely benefit from recency being factored into the relevance calculation.
+
+# Detailed design
+
+You can see the code for this proposal here: https://github.com/deepset-ai/haystack/pull/5301/files.
+
+As a general description, the ranker has the following parameters (date_identifier and method are required, the rest are optional):
+- date_identifier (string pointing to the date field in the metadata)
+- weight (the options are:
+ - 0.5 default, relevance and recency will have the same impact in the calculation;
+ - 0 only relevance will be considered for the calculation, so the RecentnessRanker is effectively disabled;
+ - 1 only recency will be considered for the calculation)
+- top_k (number of documents to return, works the same way as top-k in other rankers as well as retrievers)
+- method (the options are:
+ - "reciprocal_rank_fusion" which does not require any relevance score from the previous node;
+ - "score" requires a 0-1 relevance score provided from the previous node in the pipeline.
+ More information on method compatibility with different retrievers is in the Drawbacks section below)
+
+The RecentnessRanker works by:
+1. Adjusting the relevance score based on the chosen weight.
+ For the "reciprocal_rank_fusion" the calculation is rrf * (1 - weight). The rrf is calculated as 1 / (k + rank) where k=61 (see reasoning below).
+ And the "score" method performs the calculation as relevance score * (1 - weight).
+2. Adding to the relevance score the recentness score by:
+ For the "reciprocal_rank_fusion" - performing the rrf * weight calculation on the documents dictionary sorted by date where rrf is 1 / (k + rank), k=61.
+ For the "score" method - performing the recentness score * weight calculation where recentness score is (amount of documents - rank) / amount of documents.
+3. Returning top-k documents in the documents dictionary sorted by final score (relevance score + recentness score both adjusted by weight).
+
+k is set to 61 in reciprocal rank fusion based on a University of Waterloo paper (co-authored with Google) called "Reciprocal Rank Fusion outperforms Condorcet and individual Rank Learning Methods" [https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf] where k=60 was suggested, and 1 was added as python lists are 0-based and the paper used 1-based ranking.
+
+# Drawbacks
+
+Since this is a relatively small change without any effect on existing nodes, I do not see major reasons not to add this ranker. The only important limitation to using this node is the need to have a metadata field with document date already present.
+
+For the "score" method, you would also need to double-check that the previous node outputs a score within [0,1] range (e.g. CohereRanker, SentenceTransformersRanker, EmbeddingRetriever). With the "reciprocal_rank_fusion" method, you do not need to have the relevance score pre-calculated, so using this method allows to combine RecentnessRanker with other retrieval nodes, like BM25 retriever.
+
+# Alternatives
+
+Without adding this feature it will not be possible to handle customer and community cases where recency of documents matters for the response, see examples in the Motivation section.
+
+# Adoption strategy
+
+This is not a breaking change and there does not seem to be any need for a migration script. Existing Haystack users can just start using this node on as-needed basis in combination with existing retrieval options (sparse/dense/hybrid).
+
+# How we teach this
+
+A small change like this might not require creating a whole new tutorial (although it is of course up to you), although it can be interesting to discuss this ranker with example usage in blog post format like we have for metadata filtering (https://www.deepset.ai/blog/metadata-filtering-in-haystack).
+
+As for documentation needs, it would be good to add some info on how to use this recentness ranker - it can be added to the same page where the other existing rankers are explained. If you need help writing the documentation and/or the blog post/tutorial, please do not hesitate to reach out to me.
+
+# Unresolved questions
+
+Since it has already been implemented and is functional, there are not many known unresolved design questions. We just need to make sure that if/when the custom node is deprecated and we transition to using this node in Haystack, there are no disruptions to production pipelines that have been using this node and they get adjusted accordingly.
diff --git a/testbed/deepset-ai__haystack/proposals/text/5390-embedders.md b/testbed/deepset-ai__haystack/proposals/text/5390-embedders.md
new file mode 100644
index 0000000000000000000000000000000000000000..5314570f343d5402ce3ec3063240013662a8e546
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5390-embedders.md
@@ -0,0 +1,249 @@
+- Title: Embedders
+- Decision driver: @anakin87
+- Start Date: 2023-07-19
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/5390
+
+# Summary
+
+As decided in the previous proposals ([Embedding Retriever](3558-embedding_retriever.md) and [DocumentStores and Retrievers](4370-documentstores-and-retrievers.md)), in Haystack V2 we want to introduce a new component: the Embedder.
+
+**Separation of concerns**
+- DocumentStores: store the Documents, their metadata and representations (vectors); they offer a CRUD API.
+- Retrievers: retrieve Documents from the DocumentStores; they are specific and aware of the used Store (e.g., MemoryRetriever for the MemoryDocumentStore). They will be commonly used in query pipelines (not in indexing pipelines).
+- **Embedders**: encode a list of data points (strings, images, etc.) into a list of vectors (i.e., the embeddings) using a model. They are used both in indexing pipelines (to encode the Documents) and query pipelines (to encode the query).
+
+*In the current implementation, the Embedder is part of Retriever, which is unintuitive and comes with several disadvantages (explained in the previous proposals).*
+
+**This proposal aims to define the Embedder design.**
+
+# Basic example
+
+*This code snippet is merely an example and may not be completely up-to-date.*
+
+
+```python
+from haystack import Pipeline
+from haystack.components import (
+ TxtConverter,
+ PreProcessor,
+ DocumentWriter,
+ OpenAITextEmbedder,
+ OpenAIDocumentEmbedder,
+ MemoryRetriever,
+ Reader,
+)
+from haystack.document_stores import MemoryDocumentStore
+docstore = MemoryDocumentStore()
+
+indexing_pipe = Pipeline()
+indexing_pipe.add_store("document_store", docstore)
+indexing_pipe.add_node("txt_converter", TxtConverter())
+indexing_pipe.add_node("preprocessor", PreProcessor())
+indexing_pipe.add_node("embedder", OpenAIDocumentEmbedder(model_name="text-embedding-ada-002"))
+indexing_pipe.add_node("writer", DocumentWriter(store="document_store"))
+indexing_pipe.connect("txt_converter", "preprocessor")
+indexing_pipe.connect("preprocessor", "embedder")
+indexing_pipe.connect("embedder", "writer")
+
+indexing_pipe.run(...)
+
+query_pipe = Pipeline()
+query_pipe.add_store("document_store", docstore)
+query_pipe.add_node("embedder", OpenAITextEmbedder(model_name="text-embedding-ada-002"))
+query_pipe.add_node("retriever", MemoryRetriever(store="document_store", retrieval_method="embedding"))
+query_pipe.add_node("reader", Reader(model_name="deepset/model-name"))
+query_pipe.connect("embedder", "retriever")
+query_pipe.connect("retriever", "reader")
+
+results = query_pipe.run(...)
+```
+
+- The `OpenAITextEmbedder` uses OpenAI models to convert a list of strings into a list of vectors. It is used in the query pipeline to embed the query.
+- The `OpenAIDocumentEmbedder` uses OpenAI models to enrich a list of Documents with the corresponding vectors (stored in the `embedding` field). It is used in the indexing pipeline to embed the Documents.
+- The Retriever is no longer needed in the indexing pipeline.
+
+# Motivation
+
+The motivations behind this change were already provided in the previous proposals ([Embedding Retriever](3558-embedding_retriever.md) and [DocumentStores and Retrievers](4370-document_stores_and_retrievers.md)). Here is a summary:
+- Retrievers shouldn't be responsible for embedding Documents.
+- Currently, Retrievers have many parameters just to support and configure different underlying Encoders(≈Embedders).
+- Adding support for new embedding providers or strategies is difficult. It requires changing the Retriever code.
+
+# Detailed design
+
+## Handle queries and Documents
+This is the most critical aspect of the design.
+
+- When embedding queries, the Embedder component receives a list of strings in input that are transformed into a list of vectors returned as output.
+- When embedding documents, the Embedder component receives a list of `Document` objects in input; for each item in the list, the corresponding vectors are computed and stored in the `embedding` field of the item itself. The list is then returned as the component output.
+- When working with documents, there's the possibility to compute embeddings also for document's metadata. In this case, the Embedder will be responsible for performing any text-manipulation work needed in preparation of the actual embedding process.
+
+**Below, I will focus on the public API. The internal implementation is discussed in [Implementation details](#implementation-details).**
+
+```python
+@component
+class HFTextEmbedder:
+ ...
+
+ @component.output_types(result=List[np.ndarray])
+ def run(self, strings: List[str]):
+ ...
+ return {"result": list_of_computed_embeddings}
+
+
+@component
+class HFDocumentEmbedder:
+ ...
+
+ @component.output_types(result=List[Document])
+ def run(self, documents: List[Document]):
+ ...
+ return {"result": list_of_documents_with_embeddings}
+```
+
+## Different providers/strategies
+
+- We can define different embedder components depending on the models or services providing the actual embeddings: `OpenAIEmbedder`, `CohereEmbedder`, `HuggingFaceEmbedder`, `SentenceTransformersEmbedder`, etc.
+- Additionally, we could define different classes depending on the embedding strategy if necessary.
+While this is not a prominent use case, there are scenarios where [new strategies](https://github.com/deepset-ai/haystack/issues/5242) are introduced, requiring different libraries (`InstructorEmbedder`) or involving a different string preparation (`E5Embedder`). Supporting these scenarios with minimal effort would be nice.
+
+## Different models in the same embedding/retrieval task
+
+As you can observe from the [current implementation](https://github.com/deepset-ai/haystack/blob/main/haystack/nodes/retriever/dense.py), some embedding/retrieval tasks require the usage of different models.
+
+This is not the most popular approach today, compared to what we call Embedding Retrieval (based on a single model). But it still has some relevant applications.
+
+Some examples:
+- In Dense Passage Retrieval, you need a model to encode queries and another model to encode Documents
+- in the TableTextRetriever, we use 3 different models: one for queries, one for textual passages and one for tables
+- in Multimodal Retrieval, we can specify different models to encode queries and Documents
+
+Since the Embedder will not be included in the Retriever, it makes sense to have different Embedders, each one using a single model.
+
+```python
+dpr_query_embedder = SentenceTransformersTextEmbedder(model_name="facebook/dpr-question_encoder-single-nq-base")
+dpr_doc_embedder = SentenceTransformersDocumentEmbedder(model_name="facebook/dpr-ctx_encoder-single-nq-base")
+```
+
+## Implementation details
+
+*You can skip this section if you are primarily interested in user experience.*
+
+There have been much discussion on how to effectively implement this proposal.
+The most important aspects to consider:
+- we want different Embedders for queries and Documents as they require a different treatment
+- if the same model is internally used for different Embedders, we want to reuse the same instance in order to save memory
+
+On top of the embedder components we already discussed, we introduce one additional abstraction:
+an `EmbeddingBackend`, which is NOT a component, responsible for performing the actual embedding computation, implemented as a singleton class in order to reuse instances. It will live in a different package and will be hidden from the public API.
+```python
+@singleton # implementation is out of scope
+class HFEmbeddingBackend:
+ """
+ NOT A COMPONENT!
+ """
+ def __init__(self, model_name: str, ... init params ...):
+ """
+ init takes the minimum parameters needed at init time, not
+ the params needed at inference, so they're easier to reuse.
+ """
+ self.model = ...
+
+ def embed(self, data: str, ... inference params ... ) -> np.ndarray:
+ # compute embedding
+ return embedding
+
+
+class OpenAIEmbeddingBackend:
+ ... same as above ...
+```
+
+Implemented as singletons, when instantiating an EmbeddingBackend class, if another identical one exists, the existing one will be returned without allocating additional resources for a new one. This makes model reusability transparent, saving lots of memory without any user intervention.
+
+This is how an EmbeddingBackend would be used by a text embedder component:
+**Part of the public API**.
+```python
+@component
+class HFTextEmbedder:
+
+ def __init__(self, model_name: str, ... init params ...):
+ self.model_name = model_name
+ self.model_params = ... params ...
+
+ def warm_up(self):
+ self.embedding_backend = HFEmbeddingBackend(self.model_name, **self.model_params)
+
+ @component.output_types(result=List[np.ndarray])
+ def run(self, strings: List[str]):
+ return {"result": self.embedding_backend.embed(data)}
+```
+
+Another example, using an embedder component expecting Documents:
+**Part of the public API**.
+
+```python
+@component
+class HFDocumentEmbedder:
+
+ def __init__(self, model_name: str, ... init params ...):
+ self.model_name = model_name
+ self.model_params = ... params ...
+
+ def warm_up(self):
+ self.embedding_backend = HFEmbeddingBackend(self.model_name, **self.model_params)
+
+ @component.output_types(result=List[Document])
+ def run(self, documents: List[Document]):
+ text_strings = [document.content for document in data]
+ embeddings = self.embedding_backend.embed(text_strings)
+ documents_with_embeddings = [Document.from_dict(**doc.to_dict, "embedding": emb) for doc, emb in zip(documents, embeddings)]
+ return {"result": documents_with_embeddings}
+```
+
+# Drawbacks
+
+## Migration
+The drawbacks of separating Retrievers and Embedders were already discussed in [this proposal](https://github.com/deepset-ai/haystack/blob/main/proposals/text/4370-documentstores-and-retrievers.md) and mainly consist of **migration effort**.
+
+For example, if a user has indexed documents in the store and wants to update the embeddings using a different model instead, with the current Haystack implementation the user would run `document_store.update_embeddings(retriever)`.
+
+With the new Embedder design, I can imagine something similar (based on the MemoryDocumentStore v2 implementation):
+```python
+# get all the documents
+docs = memory_document_store.filter_documents()
+
+# compute the embedding with the new model
+new_embedder = HFDocumentEmbedder(model_name="new-model")
+docs_with_embeddings = new_embedder.run(documents=docs)
+
+# overwrite the documents
+memory_document_store.write_documents(documents=docs_with_embeddings, policy=DuplicatePolicy.OVERWRITE)
+```
+## Other aspects
+Regarding the design proposed in this document, there are some potential drawbacks to consider:
+ - Proliferation of classes (though they will be small and easy to maintain).
+ - Users need to know which models are appropriate for which task (e.g. embedding queries rather than embedding documents, see [Different models in the same embedding/retrieval task](#different-models-in-the-same-embeddingretrieval-task)). On the other hand, this approach is more explicit and will help making users aware of problems and tradeoffs related to the topic.
+
+# Alternatives
+
+Several alternatives to this design were considered. The main challenge was handling the differences between queries and Documents.
+Some ideas:
+- Have a single Embedder component for text (HFTextEmbedder instead of HFEmbeddingBackend, HFTextEmbedder and HFDocumentEmbedder) and adapt Documents before and after that, using other Components. --> Many components.
+- Make Embedders only work on Documents and represent the query as a Document. --> Unintuitive and require changes in the Retriever.
+- Create another primitive like Data (content + embedding) and use it for both queries and Documents. --> More conversion components like DataToDocument.
+- Have the DocumentEmbedder take a TextEmbedder as an input parameter. --> Fewer classes but serialization issues.
+
+# Adoption strategy
+
+This change will constitute a part of Haystack v2.
+
+# How we teach this
+
+Documentation and tutorials will be of fundamental importance.
+
+# Unresolved questions
+
+- Migration and refactoring of existing Encoders hidden in Retrievers.
+I prepared a table. Should it be shared here?
+- The migration and refactoring of TableTextRetriever require input and ownership from people involved in TableQA.
+- How to approach MultiModal Embedding? How many classes? Take into consideration that a query could also be an Image or a Table.
diff --git a/testbed/deepset-ai__haystack/proposals/text/5540-llm-support-2.0.md b/testbed/deepset-ai__haystack/proposals/text/5540-llm-support-2.0.md
new file mode 100644
index 0000000000000000000000000000000000000000..f40dc604878ab32046ea20a265b7d0b1b9349de5
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5540-llm-support-2.0.md
@@ -0,0 +1,253 @@
+- Title: LLM SUpport in Haystack 2.0
+- Decision driver: @ZanSara
+- Start Date: 2023-08-10
+- Proposal PR: #5540
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/5330
+
+# Summary
+
+In this proposal we outline our migration strategy of the `PromptNode` stack of functionality into Haystack 2.0 components.
+
+This proposal, however, does not discuss Agents or Agent-related features and functionality, which are going to be addressed into a separate proposal.
+
+# Motivation
+
+This proposal is part of a larger effort of migrating existing Haystack 1.x components into their 2.0 counterpart.
+
+# Basic example
+
+In Haystack 2.0 components should be smaller than their 1.x counterpart. Therefore, we plan to split the functionality of `PromptNode` into a few smaller components.
+
+As this proposal concerns LLM support, not specifically Agents, the main usecase in question is RAG (Retrieval-Augmented Generation).
+
+## Example: RAG Pipeline
+
+Using the components discussed in the Detailed Design section, a Haystack 2.0 RAG pipeline may look like this:
+
+```mermaid
+graph TD;
+
+IN{IN} -- "questions (List[str])" --> Retriever
+IN{IN} -- "questions (List[str])" --> PromptBuilder
+Retriever -- "documents (List[List[Doc]])" --> PromptBuilder
+PromptBuilder -- "prompts (List[str])" --> GPT4Generator
+GPT4Generator -- "replies (List[List[str]])" --> RepliesToAnswersConverter
+RepliesToAnswersConverter -- "answers (List[List[Answer]])" --> OUT{OUT}
+```
+
+While the code for such pipeline may look like:
+
+```python
+from haystack.preview.components import MemoryRetriever, PromptBuilder, ChatGPTGenerator, RepliesToAnswersConverter
+from haystack.preview.document_stores import MemoryDocumentStore
+from haystack.preview.pipeline import Pipeline
+
+pipe = Pipeline()
+pipe.add_store("store", MemoryDocumentStore())
+pipe.add_component("retriever", MemoryRetriever(), store="store")
+pipe.add_component("prompt_builder", PromptBuilder("deepset/question-answering"))
+pipe.add_component("llm", GPT4Generator(api_key="..."))
+pipe.add_component("replies_converter", RepliesToAnswersConverter())
+
+pipe.connect("retriever", "prompt_builder")
+pipe.connect("prompt_builder", "llm")
+pipe.connect("llm", "replies_converter")
+
+questions = ["Why?", "Why not?"]
+results = pipe.run({
+ "retriever": {"queries": questions},
+ "prompt_builder": {"questions": questions},
+})
+
+assert results == {
+ "replies_converter": {
+ "answers": [[Answer("Because of this.")], [Answer("Because of that.")]]
+ }
+}
+```
+
+# Detailed design
+
+Haystack’s `PromptNode` is a very complex component that includes under its name several functionalities: loading prompt templates through the `PromptTemplate` class, rendering such prompt template with the variables from the invocation context, choosing which LLM backend to use, sending the prompt to the LLM using the correct invocation layer, interpreting the results, parsing them into objects, and putting them back in the pipeline in a way other components can understand.
+
+in Haystack 2.0 we unpack these functionalities into a few separate components, to clarify what is happening, how it works, and provide additional flexibility.
+
+The main functionalities we identified are the following:
+
+1. Fetching the prompt from different sources
+2. Rendering the prompt using variables
+3. Invoke the LLM
+4. Parse the output
+5. History/Memory management
+
+We leave the discussion about History/Memory to a separate proposal, as it concerns mostly Agents, and focus on the other points.
+
+## LLM invocation
+
+In Haystack 1.x, `PromptNode` uses `InvocationLayer` to query different LLMs under a unified API. In that design, users do not need to know which invocation layer is used for the model they select, as `PromptNode` takes responsibility of selecting it.
+
+Such invocation layers can be ported to 2.0 as standalone components. In this way we will have one component for each LLM backed that we support.
+
+Each component should be named after the class of models it supports, plus a `Generator` suffix. For example we should have `GPT4Generator`, `HuggingFaceLocalGenerator`, `CohereGenerator`, etc.
+
+Note that having separate components for each LLM makes easy to deprecate them when we realize they are dropping out of favor or become severely outdated. It also makes very easy for external contributors to make their own external components to support rarer LLMs, without having to add them to Haystack’s core.
+
+All these LLM clients will have a near-identical I/O:
+
+```python
+@component
+class ChatGPTGenerator:
+
+ @component.output_types(replies=List[List[str]])
+ def run(self, prompts: List[str], ... chatgpt specific params...):
+ ...
+ return {'replies': [...]}
+```
+
+Note how the component takes a list of prompts and LLM parameters only, but no variables nor templates, and returns only strings. This is because input rendering and output parsing are delegated to separate components, which description follows.
+
+Note: whether LLM components accept multiple prompts or a single one depends only on whether we want the LLM to support batching of prompts. Therefore it's an implementation decision that will be evaluated once we know the internals of the component. We strive to keep the interfaces as similar as possible to ease switching the various LLMs, but we won't force identical interfaces over them where it doesn't make sense with respect to their internal implementation.
+
+### Chat API
+
+Most LLMs support Chat interfaces, where they expect not a single prompt, but a list of messages in a format such as:
+
+```
+[
+ {"role": "system", "content": "\nYou are a helpful assistant speaking like a pirate. argh!"},
+ {"role": "user", "content": "What is the sun?"},
+]
+```
+
+In this proposal we're not taking this way of querying the LLMs into account, we will just focus on simple completion. We're going to address the Chat completion topic in a dedicated proposal.
+
+### Returning metadata
+
+In the example above we made the LLM return only a list of replies, as strings. However, in order to be able to parse the output into meaningful objects (see “Output parsing”) we may need additional metadata from these clients.
+
+1. Do we already have any such situation?
+2. Can we foresee any other?
+
+If the answer to any of the above is yes, a simple, maybe temporary solution would be to add a second output, called for example `'replies_meta'` . Any component that need such meta to parse the output would then request this second output along with the first and zip the two lists together to reconstruct the original output of the LLM.
+
+### Returning streams
+
+Most LLMs like ChatGPT have the option to stream the tokens as their generated. As we're already supporting this use case in version 1.x we want to support it in 2.x too.
+
+Currently we're supporting it by accepting a callback during `PromptNode` initialization that will be called every time a new chunk of the streamed response is received.
+
+In version 2.x we're going to do the same and have all generator components accept an optional callback during initialization.
+
+### How many clients we will have?
+
+Basing on the list of current invocation layers in Haystack 1.x, the list might look like:
+
+1. `ClaudeGenerator`
+2. `ChatGPTGenerator`, supporting GPT4 as well
+ - it may have an alias `GPT4Generator` to help users
+3. `CohereGenerator`
+4. `HuggingFaceInferenceGenerator`
+5. `HuggingFaceLocalGenerator`
+6. `GPT3Generator`
+7. `SagemakerGenerator`
+
+Plus one more for any other inference hosting/library that may appear in the future.
+
+## Prompt Builder
+
+In Haystack 1.x, prompts fetching and rendering is carried out by `PromptTemplate`. In 2.0, we rather make a separate `PromptBuilder` component to handle this process.
+
+The goal of `PromptBuilder` is to transform prompt templates, which are strings with variables (for example f-strings, Jinja-like templates), and fill up those variables with values that come from other components in the pipeline or from the pipeline inputs. The output of tis component is one (or more) prompts, where prompts means strings that the LLM can directly use.
+
+`PromptBuilder` is not tokenizer aware: the prompt will be checked for length by the LLM component before inference. If the need arise, we may later extend the component.
+
+Draft I/O for `PromptBuilder`:
+
+```python
+@component
+class PromptBuilder:
+
+ def __init__(self, template: Union[str, Path]):
+ self.template = # Download the template
+ template_variables = # extracts the variables from the template text
+ component.set_input_parameters(**{var: Any for var in template_variables})
+
+ @component.output_types(prompts=List[str])
+ def run(self, **kwargs):
+ # Render the template using the variables
+ return {"prompts": prompts}
+```
+
+### Template variables
+
+Due to the dynamic nature of prompt templates, the `PromptBuilder.run()` method takes `kwargs`, which contains all the variables that will be filled in the template. However, for this component to work with Canals, we need to know in advance which values this dict will contain: therefore, we need the users to specify in the `__init__` of the component the template to use.
+
+Such template names **cannot be changed at runtime**.
+
+The design above derives from one Canals limitation: component’s sockets need to be all known the latest at `__init__` time, in order for the connections to be made and validated. Therefore, we need to know all the prompt variables before building the pipelines, because the prompt variables are inputs of the `run()` method.
+
+However, earlier iterations of Canals did support so-called “true variadic” components: components that do not need to know what they will be connected to, and build the input sockets at need. Such components of course lack input validation, but enable usecases like the above.
+
+If we decide that Canals should support again such components, we would be able to rewrite `PromptBuilder` to take a prompt as its input parameter and just accept any other incoming input, on the assumption that users knows that they’re doing.
+
+For example:
+
+```python
+@component
+class PromptBuilder:
+
+ @variadic_input
+ @component.output_types(prompts=List[str])
+ def run(self, template: Union[str, Path], **kwargs):
+ # ... loads the template ...
+ # ... render the prompts ...
+ return {"prompts": prompts}
+```
+
+### Why a separate `PromptBuilder` component at all?
+
+`PromptNode` used to take the prompt template and the variables to render it directly, and then forward the result to the LLM.
+
+The key advantage of `PromptBuilder` is ability to use any tool from the ever growing list of LLM prompting template libs. If someone has invested a lot in guidance and considers using a framework like Haystack or LangChain this will be one of the biggest selling points: an ability to use `PromptBuilder` for a specific prompt tooling lib.
+
+On top of that, it also allows users to skip the template rendering step altogether and send prompts directly to the LLM, which may be beneficial in some context (for example, if users just want to chat with the LLM without RAG).
+
+The drawback is that `PromptBuilder` is an additional component, so we must evaluate if this additional flexibility is worth the additional complexity. However, at this point in time we are convinced that the benefits are worth the cost.
+
+## Output parsing
+
+LLMs clients output strings, but many components expect other object types, and LLMs may produce output in a parsable format that can be directly converted into objects. Output parsers transform these strings into objects of the user’s choosing.
+
+In Haystack 1.x, this task was assigned to the subclasses of `BaseOutputParser`. In 2.0 we’re going to have a very similar situation, with the difference that such classes are components.
+
+The most straightforward component in this category is `RepliesToAnswersConverter`. It takes the string replies of an LLM and produce `Answer` objects. One additional output parser could be `RepliesToAnswersWithReferencesConverter`, which also connects answers to the documents used to produce them. As the need for additional output parsers arises, we will progressively add more.
+
+Draft I/O for `RepliesToAnswersConverter` (note: this may end up being almost the entire component’s implementation):
+
+```python
+@component
+class RepliesToAnswersConverter:
+
+ @component.output_types(answers=List[List[Answer]])
+ def run(self, replies: List[List[str]]):
+ return {"answers": Answer(answer=answer) for answers in replies for answer in answers}
+```
+
+# Drawbacks
+
+Possible drawbacks of this design:
+
+1. Users now need to use three components instead of a single, large one.
+2. We lose the capability to change the prompt for the LLM at runtime.
+
+# Alternatives
+
+1. Porting the existing `PromptNode` to Haystack 2.0: would be a massive effort and make the new version inherit some design decision that, with time, proved unnecessary and/or clumsy to use, like the “hiding” of invocation layer that makes it quite hard for external contributors to add support for other LLMs to `PromptNode`, or it’s imperfect layer selection algorithm.
+
+# Adoption strategy
+
+Follows the same strategy outlines for all other Proposal relative to the Haystack 2.0 migration
+
+# How we teach this
+
+We need brand new tutorials and examples of pipelines using these components.
diff --git a/testbed/deepset-ai__haystack/proposals/text/5738-document-2.0.md b/testbed/deepset-ai__haystack/proposals/text/5738-document-2.0.md
new file mode 100644
index 0000000000000000000000000000000000000000..43a1ebf2f57466ded64f8a75fd0b48562644d424
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5738-document-2.0.md
@@ -0,0 +1,108 @@
+- Title: Document class for Haystack 2.0
+- Decision driver: ZanSara
+- Start Date: 2023-09-07
+- Proposal PR: 5738
+
+# Summary
+
+With Haystack 2.0 we want to provide a lot more flexibility to Pipelines and Components. In a lot of situations,
+we found that the Document class inherited from Haystack 1.x was not up to the task: therefore we chose to expand its
+API to work best in this new paradigm.
+
+# Basic example
+
+Documents 2.0 have two fundamental differences with Documents 1.x:
+
+- They have more than one content field. Documents 1.x only have a `content: Any` field that needs to match with the
+ `content_type` field in meaning. Documents 2.0 instead support `text`, `array`, `dataframe` and `blob`, each typed
+ correctly.
+
+- The `content_type` field is gone: In Haystack 1.x we used the `content_type` field to interpret the data contained
+ in the `content` field: with the new design, this won't be necessary any longer. Haystack 2.0, however, have
+ a `mime_type` field that helps interpret the content of the `blob` field if necessary.
+
+# Motivation
+
+During the development of Haystack 2.0 components, we often found ourselves hold back by the design limitations of
+the Document class. Unlike in Haystack 1.x, Documents now carry more information across the pipeline: for example,
+they might contain the file they originated from, they might support more datatypes, etc.
+
+Therefore we decided to extend the Document class to support a wider array of data.
+
+# Detailed design
+
+The design of this class was inspired by the [DocArray API](https://docarray.jina.ai/fundamentals/document/).
+
+Here is the high-level API of the new Document class:
+
+```python
+@dataclass(frozen=True)
+class Document:
+ id: str = field(default_factory=str)
+ text: Optional[str] = field(default=None)
+ array: Optional[numpy.ndarray] = field(default=None)
+ dataframe: Optional[pandas.DataFrame] = field(default=None)
+ blob: Optional[bytes] = field(default=None)
+ mime_type: str = field(default="text/plain")
+ metadata: Dict[str, Any] = field(default_factory=dict, hash=False)
+ id_hash_keys: List[str] = field(default_factory=lambda: ["text", "array", "dataframe", "blob"], hash=False)
+ score: Optional[float] = field(default=None, compare=True)
+ embedding: Optional[numpy.ndarray] = field(default=None, repr=False)
+
+ def to_dict(self):
+ """
+ Saves the Document into a dictionary.
+ """
+
+ def to_json(self, json_encoder: Optional[Type[DocumentEncoder]] = None, **json_kwargs):
+ """
+ Saves the Document into a JSON string that can be later loaded back. Drops all binary data from the blob field.
+ """
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ """
+ Creates a new Document object from a dictionary of its fields.
+ """
+
+ @classmethod
+ def from_json(cls, data, json_decoder: Optional[Type[DocumentDecoder]] = None, **json_kwargs):
+ """
+ Creates a new Document object from a JSON string.
+ """
+
+ def flatten(self) -> Dict[str, Any]:
+ """
+ Returns a dictionary with all the document fields and metadata on the same level.
+ Helpful for filtering in document stores.
+ """
+
+```
+
+As you can notice, the main difference is the management of the content fields: we now have:
+
+- `text`: for text data
+- `array`: for array-like data, for example images, audio, video
+- `dataframe`: for tabular data
+- `blob`: for binary data.
+
+In order to help interpret the content of these field, there's a `mime_type` field that components can use to figure out
+how to use the content fields they need.
+
+There are additional information that we may want to add, for example `path`. For now such info can be
+kept into the metadata: if we realize we access it extremely often while processing Documents we should consider
+bringing those fields out of `metadata` as top-level properties of the dataclass.
+
+
+# Drawbacks
+
+As the Document class becomes a bit more complex, components need to be adapted to it. This may cause some issues
+to DocumentStores, because now they not only need to be able to store text but binary blobs as well.
+
+We can imagine that some very simple DocumentStore will refuse to store the binary blobs. Fully-featured,
+production-ready document stores instead should be able to find a way to store such blobs.
+
+
+# Unresolved questions
+
+Are the 4 content fields appropriate? Are there other content types we can consider adding?
diff --git a/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.md b/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.md
new file mode 100644
index 0000000000000000000000000000000000000000..9093a19e0a6e0bc59a6e448a0bcf2eb4868dc6f2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.md
@@ -0,0 +1,227 @@
+- Title: Evaluation in Haystack 2.0
+- Decision driver: (Silvano Cerza, Julian Risch)
+- Start Date: 2023-08-23
+- Proposal PR: [#5794](https://github.com/deepset-ai/haystack/pull/5794/)
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/5628
+
+# Summary
+
+Given the below requirements we redefine how evaluation and metrics are handled in Haystack 2.x.
+
+Our goal is to lower the barrier of entry for new comers but also making it more flexible and extensible for more advanced and expert users.
+All this while making it more modular and easier to test and maintain.
+
+The requirements are:
+
+- compare the performance of different pipelines on level of pipeline outputs (user perspective, integrated eval)
+ - while running the full pipeline we can store intermediate results and calculate metrics for each component that returns answers or documents
+- find out which component is the performance bottleneck in one pipeline by evaluating subpipelines (isolated evaluation)
+- as above, get evaluation metrics for every component in a pipeline that returns answer or documents (ranker, retriever, reader, PromptNode)
+- compare the performance of two components, for example two Readers, without the need to create a full retriever-reader pipeline
+- export evaluation results to a file (similar to Haystack 1.x but faster) and evaluation report
+- choose evaluation metrics from a list of metrics (e.g. F1, BLEU, ROUGE, Semantic Answer Similarity) based on the output type of a component
+- evaluate pipelines that return ExtractiveAnswers
+- evaluate pipelines that return GenerativeAnswers
+- evaluate hallucinations (check generated answers are backed up by retrieved documents)
+- evaluate pipelines with PromptNodes and arbitrary PromptTemplates (for example with Semantic Answer Similarity or BLEU, ROUGE (metrics from machine translation and summarization) if I provide labels)
+- load evaluation data for example from BEIR
+
+# Basic example
+
+```python
+pipe = Pipeline()
+...
+inputs = [{"component_name": {"query": "some question"}, ...}, ...]
+expected_output = [{"another_component_name": {"answer": "42"}, ...}, ...]
+result = eval(pipe, inputs=inputs, expected_output=expected_output)
+metrics = result.calculate_metrics(Metric.SAS)
+metrics.save("path/to/file.csv")
+```
+
+A more thorough example of a real use case can be found in the `0000-evaluation-haystack-2.py` file. It creates a small RAG Pipeline and shows how one would run evaluation on it.
+
+# Motivation
+
+Since the new version 2.x of Haystack is going toward a completely different approach to run `Pipeline`s and `component`s we also need to rework how we evaluate them.
+
+The current implementation in version 1.x is convoluted and mixes evaluation and metrics at different steps during the process. This makes it harder to reason about it and maintain. This is noticeable also by the fact that only a limited amount of nodes can be evaluated. Also it's currently not easy to calculate custom metrics.
+
+The goal of this new approach is to make it easier for users to evaluate and calculate metrics for their `Pipeline`s and `component`s. Evaluation is not an easy task to reason about and understand. Making it simpler will also make it less intimidating to less experienced users, pushing more people to approach this important part of Haystack.
+
+It must be flexible by making it possible to evaluate any type of `component`. This is also an hard requirement since we want to make it easier for users to define new `component`s. Limiting the types of `component`s that can be evaluated would also limit the usage of custom or novelty ones, thus limiting the user creativity.
+
+Extensibility is part of the goal too. We want to make is possible for user to calculate some of most common metrics, but also use custom logic to calculate any other one. As of now it's not possible to calculate custom metrics without knowing the nitty gritty of the Haystack internals.
+
+All these goals merge also into another goal, maintainability. By making evaluation and metrics calculation more modular and easy to use we're also going to make it more maintainable and testable.
+
+# Detailed design
+
+### The `eval` function
+
+We'll implement an `eval` function that will be able to evaluate all `Pipeline`s and `Component`s.
+A minimal implementation could look like this:
+
+```python
+def eval(runnable: Union[Pipeline, Component], inputs: List[Dict[str, Any]], expected_outputs: List[Dict[str, Any]]) -> EvaluationResult:
+ outputs = []
+ for input_ in inputs:
+ output = runnable.run(input_)
+ outputs.append(output)
+ return EvaluationResult(runnable, inputs, outputs, expected_outputs)
+```
+
+This is obviously an overtly simplistic example but the core concept remains.
+`inputs` must be a list of data that will be passed to either the `Pipeline` or the `Component`.
+`expected_outputs` could be a list with the same length of `inputs` or an empty list for blind evaluation.
+
+Blind in this context means running an evaluation without providing a list of expected output. This could be done for several reasons, like if we don't know what to expect as output, or to compare output of different components.
+
+`EvaluationResult` could either be a `Dict` or its own class, this is open to discussion. Either way it must be easy to save to disk. When saving the results to disk we can also include the `Pipeline` or `Component` in a serialized form.
+
+When evaluating a `Pipeline` we could also override its private `_run_component` function to evaluate every node it will run. This will 100% work for our implementation of `Pipeline`. If a user tries to evaluate a `Pipeline` that reimplements its own `run` method it might not be able to evaluate each `Component`. I believe this a worthy risky tradeoff.
+
+Overriding `_run_component` would also give us the chance to simulate optimal component outputs. `eval` could also accept an optional `simulated_output` dictionary containing the outputs of one or more `Component` that are in the `Pipeline`. It would look similar to this:
+
+```python
+simulated_output = {
+ "component_name": {"answer": "120"},
+ "another_component_name": {"metadata": {"id": 1}}
+}
+```
+
+Another alternative would be to use the `expected_output` also to simulate outputs of intermediate components.
+
+#### Tracking progress
+
+To track progress we can also go on another direction. We could return partials results while iterating our inputs:
+
+```
+def eval(runnable, inputs, expected_outputs):
+ result = EvaluationResult(runnable, inputs, {}, expected_outputs)
+ for input_ in inputs:
+ output = runnable.run(input_)
+ result.append_output(output)
+ yield result
+```
+
+Or return some progress percentage and only in the end the final result.
+
+```
+def eval(runnable, inputs, expected_outputs):
+ outputs = []
+ total = len(inputs)
+ for i, input_ in enumerate(inputs):
+ output = runnable.run(input_)
+ outputs.append(output)
+ yield 100 * (i / total), None
+
+ yield 100, EvaluationResult(runnable, inputs, outputs, expected_outputs)
+```
+
+### `EvaluationResult`
+
+`EvaluationResult` won't contain partial metrics anymore as it does in v1.x but it will keep track of all the information used by `eval()`.
+This way we'll be able to save all the necessary information to a single file. That information will probably be:
+
+- Serialized Pipeline or Component
+- Inputs
+- Expected outputs
+- Actual outputs
+
+This data should be serializable to string so that it can be saved to file and loaded back whenever necessary.
+We shouldn't expect all input and output data to implement serialization methods like `to_dict` and `from_dict` like `Pipeline` and `component`s do, so we probably should find an alternative to handle serialization of types that don't. An unsafe option would be the use of `pickle`, but that's dangerous as it could lead to malicious code being executed.
+
+Given the above information we should be able to implement a single method to calculate predeterminated metrics or even custom ones.
+Known metrics could be defined as an enum to ease discoverability and documentation.
+
+```python
+class Metric(Enum):
+ RECALL = "Recall"
+ MRR = "Mean Reciprocal Rank"
+ MAP = "Mean Average Precision"
+ EM = "Exact Match"
+ F1 = "F1"
+ SAS = "SemanticAnswerSimilarity"
+```
+
+The method to calculate metrics could look similar to this:
+
+```python
+MetricsResult = Dict[str, Dict[str, float]]
+MetricCalculator = Callable[..., MetricResult]
+
+def calculate_metrics(self: EvaluationResult, metric: Union[Metric, MetricCalculator], **kwargs) -> MetricsResult:
+ # Verify if we're calculating a known metric
+ if metric == Metric.RECALL:
+ return self._calculate_recall(**kwargs)
+ elif metric == Metric.MRR:
+ return self._calculate_mrr(**kwargs)
+ # Other metrics...
+
+ # If it's not a known metric it must be a custom one
+ return metric(self, **kwargs)
+```
+
+This gives the users the flexibility to easily calculate metrics that we support but also use custom logic to calculate any kind of metric given the available data.
+Since users will need to save their calculated metrics to file we could create a simple `MetricResult` class that simply wraps the generated metrics dictionary, something similar:
+
+```python
+class MetricResult(dict):
+ def save(self, file: Union[str, Path]):
+ # Dump info to file here
+```
+
+### Ease input specification
+
+When declaring the input of a `Pipeline` we must specify both a `component` name and its input names. This can be annoying so we could simplify this by making certain assumptions.
+
+An approach to this problem could be letting the user skip the `component` name specification when declaring the `Pipeline` input. This could work up until the point the user changes their `Pipeline` to have multiple inputs with the same name.
+
+So given a `Pipeline` that has single input `component` name `foo` that takes a `query` as its input we can let the user specify the `eval` input like so:
+
+```python
+eval(pipe, {"query": "This is the query"})
+```
+
+If the user adds a new `component` name `bar` that also takes a `query` as input we'll make evaluation fail preventively since we cannot be sure whether both `component`s must take `query` as input and force explicit specification:
+
+```python
+eval(pipe, {"foo": {"query": "This is the query"}, "bar": {"query": "This is the query"}})
+```
+
+# Drawbacks
+
+The major drawback found from the feedback gathered is always the same, and it's also common to `Pipeline.run()`. It's frustrating and annoying having to update the evaluation input and/or output data if I make changes to my `Pipeline`.
+
+Given that new `Pipeline` can have multiple inputs to different `component`s we must specify which component will take which input. As an example given a `Pipeline` with two input components called `foo` and `bar` that takes a `input_query` value we'll have to specify input as follow:
+
+```python
+input = {
+ "foo": {"input_query": "This my input query"},
+ "bar": {"input_query": "This my input query"}
+}
+```
+
+I believe this a worthy tradeoff as it gives huge amount of customization that wasn't possible in Haystack 1.x.
+
+We could ease inputs specification in some cases as specified in the above section but that would make evaluation prone to errors. I believe that to be a dangerous approach as it could mean making evaluation "easier to use" at the cost of making it also more error prone if not done properly and with the correct safe guards.
+
+The same thing can be said for the `Pipeline` output. There will be multiple outputs most of the times since we're going to evaluate individual nodes and the whole `Pipeline` input with a single evaluation run. So the user must specify from which `component` the output should be expected from.
+
+Expected output specification suffers from the same issue of inputs specification. So making assumption to try and match expected output with the actual `Pipeline` output is still error prone but probably can be handled for really simple `Pipeline`s with only a single output `component`.
+
+Evaluation should be an advanced topic for users that know what they're actually doing. This might seem contradictory to what has been said above regarding approachability of the feature but I believe this to be an easy to use but and hard to master feature.
+
+# Adoption strategy
+
+This is obviously a breaking change as it's meant for Haystack 2.x.
+
+# How we teach this
+
+Much like the current situation we'll write tutorials, examples and documentation to go along with this new feature. We can also leverage future ready-made `Pipeline`s to show how to evaluate them.
+
+We're also going to have new community spotlights in Discord to show how to approach evaluation in Haystack 2.x.
+
+# Unresolved questions
+
+Evaluation of pipelines containing Agents or other loops is out of scope for this proposal (except for integrated pipeline evaluation).
diff --git a/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.py b/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e0661d0cb8cd1d934569dfa6bd0688d20f3b0e7
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/5794-evaluation-haystack-2.py
@@ -0,0 +1,141 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+# mypy: ignore-errors
+# pylint: skip-file
+###
+### This is an example script of how to use the eval function to evaluate a RAG Pipeline.
+### For more information see the relative proposal.
+###
+
+import os
+
+from haystack import Pipeline
+from haystack.components.builders.prompt_builder import PromptBuilder
+from haystack.components.generators.openai.gpt35 import GPT35Generator
+from haystack.components.retrievers.memory import MemoryBM25Retriever
+from haystack.dataclasses.document import Document
+from haystack.document_stores.memory import MemoryDocumentStore
+
+docstore = MemoryDocumentStore()
+
+# Write some fake documents
+docstore.write_documents(
+ [
+ Document(content="This is not the answer you are looking for.", meta={"name": "Obi-Wan Kenobi"}),
+ Document(content="This is the way.", meta={"name": "Mandalorian"}),
+ Document(content="The answer to life, the universe and everything is 42.", meta={"name": "Deep Thought"}),
+ Document(content="When you play the game of thrones, you win or you die.", meta={"name": "Cersei Lannister"}),
+ Document(content="Winter is coming.", meta={"name": "Ned Stark"}),
+ ]
+)
+
+# Create our retriever, we set top_k to 3 to get only the best 3 documents otherwise by default we get 10
+retriever = MemoryBM25Retriever(document_store=docstore, top_k=3)
+
+# Create our prompt template
+template = """Given the context please answer the question.
+Context:
+{# We're receiving a list of lists, so we handle it like this #}
+{% for list in documents %}
+ {% for doc in list %}
+ {{- doc -}};
+ {% endfor %}
+{% endfor %}
+Question: {{ question }};
+Answer:
+"""
+prompt_builder = PromptBuilder(template)
+
+# We're using OpenAI gpt-3.5
+OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
+generator = GPT35Generator(api_key=OPENAI_API_KEY)
+
+# Build the pipeline
+pipe = Pipeline()
+
+pipe.add_component("docs_retriever", retriever)
+pipe.add_component("builder", prompt_builder)
+pipe.add_component("gpt35", generator)
+
+pipe.connect("docs_retriever.documents", "builder.documents")
+pipe.connect("builder.prompt", "gpt35.prompt")
+
+# Run the pipeline
+query = "What is the answer to life, the universe and everything?"
+result = pipe.run({"docs_retriever": {"queries": [query]}, "builder": {"question": query}})
+
+print(result["gpt35"]["replies"])
+
+
+# These are the input that will be passed to the Pipeline when running eval, much like we've done a couple of lines above
+inputs = [
+ {"docs_retriever": {"queries": ["What is the answer?"]}, "builder": {"question": "What is the answer?"}},
+ {
+ "docs_retriever": {"queries": ["Take a deep breath and think. What is the answer?"]},
+ "builder": {"question": "Take a deep breath and think. What is the answer?"},
+ },
+ {
+ "docs_retriever": {"queries": ["What is the answer to life, the universe and everything?"]},
+ "builder": {"question": "What is the answer to life, the universe and everything?"},
+ },
+]
+
+# These are the expected output that will be compared to the actual output of the Pipeline.
+# We have a dictionary for each input so that len(inputs) == len(expected_output).
+# This gives the possibility to have different expected output for each different input.
+# NOTE: I omitted the gpt35 metadata output because it's too long.
+expected_output = [
+ {
+ # This is the output that we expect from the docs_retriever component
+ "docs_retriever": {
+ "documents": [
+ [
+ Document(
+ content="The answer to life, the universe and everything is 42.", meta={"name": "Deep Thought"}
+ ),
+ Document(content="This is not the answer you are looking for.", meta={"name": "Obi-Wan Kenobi"}),
+ Document(content="This is the way.", meta={"name": "Mandalorian"}),
+ ]
+ ]
+ },
+ # This is the output that we expect from the builder component
+ "builder": {"prompt": "I should write the actual template here but I'm lazy so I won't."},
+ # This is the output that we expect from the gpt35 component
+ "gpt35": {"replies": ["The answer to life, the universe and everything is 42."], "metadata": {}},
+ },
+ {
+ "docs_retriever": {
+ "documents": [
+ [
+ Document(
+ content="The answer to life, the universe and everything is 42.", meta={"name": "Deep Thought"}
+ ),
+ Document(content="This is not the answer you are looking for.", meta={"name": "Obi-Wan Kenobi"}),
+ Document(content="This is the way.", meta={"name": "Mandalorian"}),
+ ]
+ ]
+ },
+ "builder": {"prompt": "I should write the actual template here but I'm lazy so I won't."},
+ "gpt35": {"replies": ["The answer to life, the universe and everything is 42."], "metadata": {}},
+ },
+ {
+ "docs_retriever": {
+ "documents": [
+ [
+ Document(
+ content="The answer to life, the universe and everything is 42.", meta={"name": "Deep Thought"}
+ ),
+ Document(content="This is not the answer you are looking for.", meta={"name": "Obi-Wan Kenobi"}),
+ Document(content="This is the way.", meta={"name": "Mandalorian"}),
+ ]
+ ]
+ },
+ "builder": {"prompt": "I should write the actual template here but I'm lazy so I won't."},
+ "gpt35": {"replies": ["The answer to life, the universe and everything is 42."], "metadata": {}},
+ },
+]
+
+eval_result = eval(pipe, inputs=inputs, expected_output=expected_output)
+metrics = result.calculate_metrics(Metric.SAS) # noqa
+metrics.save("path/to/file.csv")
diff --git a/testbed/deepset-ai__haystack/proposals/text/6001-document-store-filter-rework.md b/testbed/deepset-ai__haystack/proposals/text/6001-document-store-filter-rework.md
new file mode 100644
index 0000000000000000000000000000000000000000..64e7ab9bfb605b3df7db6f6ba3052e693bbf0a9d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/6001-document-store-filter-rework.md
@@ -0,0 +1,183 @@
+- Title: Document Stores filter specification for Haystack 2.x
+- Decision driver: Silvano Cerza
+- Start Date: 2023-10-08
+- Proposal PR: [#6001](https://github.com/deepset-ai/haystack/pull/6001)
+
+# Summary
+
+This proposal introduces a new fully detailed and extensible specification for filtering Document Stores in Haystack 2.x.
+This comes from my personal experience and struggles trying to implement filters conversion for `ElasticsearchDocumentStore`.
+
+# Basic example
+
+```
+{
+ "conditions": [
+ { "field": "age", "operator": ">=", "value": 18 },
+ {
+ "operator": "OR",
+ "conditions": [
+ { "field": "country", "operator": "==", "value": "USA" },
+ { "field": "country", "operator": "==", "value": "Canada" }
+ ]
+ }
+ ]
+}
+```
+
+The above snippet would be equivalent to the following Python code:
+
+```python
+age >= 18 and (country == "USA" or country == "Canada)
+```
+
+# Motivation
+
+Filtering in Haystack 1.x has no detailed clear specification, we only have an [high level overview][filters-high-level-doc] in the documentation that also mixes REST APIs documentation in. It's also inherited from MongoDB and is a subset of the Mongo Query Language.
+
+Converting from the current filters to another query language is quite hard as there are tons of corner cases. Handling nested filters is usually really error prone as the operators can be keys, this requires ton of nested and/or recursive logic to figure out whether the current key is a field or an operator. There's also quite some backtracking involved as the field could be two or three levels above its comparison operator and/or value.
+
+As a practical example the below two filters are equivalent. Given that they're structured differently and `$and` is implicit in the second one:
+
+```
+{"number": {"$and": [{"$lte": 2}, {"$gte": 0}]}}
+
+{"number": {"$lte": 2, "$gte": 0}}
+```
+
+With the newly proposed approach both filters would be equivalent to:
+
+```
+{
+ "operator": "AND",
+ "conditions": [
+ { "field": "number", "operator": "<=", "value": 2 },
+ { "field": "number", "operator": ">=", "value": 0 },
+ ]
+}
+```
+
+As you can see all the required information is one the same level and clearly recognisable. This makes it much easier both to read by a human and convert with code.
+
+In Python code:
+
+```python
+number <= 2 AND number >= 0
+```
+
+# Detailed design
+
+Filters top level must be a dictionary.
+
+There are two types of dictionaries:
+
+- Comparison
+- Logic
+
+Top level can be either be a Comparison or Logic dictionary.
+
+Comparison dictionaries must contain the keys:
+
+- `field`
+- `operator`
+- `value`
+
+Logic dictionaries must contain the keys:
+
+- `operator`
+- `conditions`
+
+`conditions` key must be a list of dictionaries, either Comparison or Logic.
+
+`operator` values in Comparison dictionaries must be:
+
+- `==`
+- `!=`
+- `>`
+- `>=`
+- `<`
+- `<=`
+- `in`
+- `not in`
+
+`operator` values in Logic dictionaries must be:
+
+- `NOT`
+- `OR`
+- `AND`
+
+---
+
+As an example this:
+
+```
+{
+ "$and": {
+ "type": {"$eq": "article"},
+ "$or": {"genre": {"$in": ["economy", "politics"]}, "publisher": {"$eq": "nytimes"}},
+ "date": {"$gte": "2015-01-01", "$lt": "2021-01-01"},
+ "rating": {"$gte": 3},
+ }
+}
+```
+
+would convert to:
+
+```
+{
+ "operator": "AND",
+ "conditions": [
+ { "field": "type", "operator": "==", "value": "article" },
+ {
+ "operator": "OR",
+ "conditions": [
+ { "field": "genre", "operator": "in", "value": ["economy", "politics"] },
+ { "field": "publisher", "operator": "==", "value": "nytimes" },
+ ]
+ },
+ { "field": "date", "operator": ">=", "value": "2015-01-01" },
+ { "field": "date", "operator": "<", "value": "2021-01-01" },
+ { "field": "rating", "operator": ">=", "value": 3 },
+ ]
+}
+```
+
+In Python code:
+
+```python
+type == "article" and (
+ genre in ["economy", "politics"] or publisher == "nytimes"
+) and date >= "2015-01-01" and date < "2021-01-01" and rating >= 3
+```
+
+Dates have been kept as strings but ideally in the new implementation they would be converted to `datetime` instances so the Document Store will be able to convert it to whatever format it needs to actually compare them. As different Document Stores might have different ways of storing the same value it's important that they handle the conversion from Python type to stored type.
+
+Another thing that in my opinion should be changed is the that filtering metadata fields must be explicitly specified for filtering. In the example above all `field`s would be prefixed with `metadata.` to get in return the expected `Document`s. e.g. `date` -> `metadata.date`
+This connects to `Document` implementation and is not the focus of this proposal, but it should be taken into account.
+
+# Drawbacks
+
+The only drawback would be that we need to adapt the existing Document Stores already created for Haystack 2.x to support this filtering system. `MemoryDocumentStore`, `ElasticsearchDocumentStore`, `ChromaDocumentStore` and `MarqoDocumentStore` are the currently existing Document Stores.
+
+# Alternatives
+
+An alternative would be keeping the current strategy of declaring filters.
+This wouldn't require any change but supporting a new filtering language after the release of Haystack 2.x would be more difficult than doing it now.
+
+# Adoption strategy
+
+We're going to release this new strategy of filters declaration for Haystack 2.x. At the same time we'll deprecate the current strategy but we'll keep supporting it for a while.
+
+Since we're going to provide an utility function to convert from old style to new style it will be easy for Document Stores to support both.
+
+# How we teach this
+
+We're going to provide documentation and specifications on how the filters should be declared, this proposal is a good starting point as it already defines the specs.
+
+We're also going to provide utility functions to migrate filters from old style to new style.
+
+# Unresolved questions
+
+This is the full design and there are no unresolved questions.
+
+[filters-high-level-doc]: https://docs.haystack.deepset.ai/docs/metadata-filtering
diff --git a/testbed/deepset-ai__haystack/proposals/text/6141-meta-field-ranker.md b/testbed/deepset-ai__haystack/proposals/text/6141-meta-field-ranker.md
new file mode 100644
index 0000000000000000000000000000000000000000..ca177366055107d8b7307f7a1455d9d088fac4ee
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/6141-meta-field-ranker.md
@@ -0,0 +1,68 @@
+- Title: Meta Field Ranker
+- Decision driver: @domenicocinque
+- Start Date: 2023-10-20
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/6141
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/6054
+
+# Summary
+
+This ranker should allow to sort the documents based on a meta field of choice.
+
+# Basic example
+
+The ranker could be instantiated as follows:
+
+``` python
+ranker = MetaFieldRanker(
+ meta_field="rating",
+ weight="0.5",
+ ascending=False,
+ top_k=3,
+)
+```
+In the context of a simple pipeline with a retriever and a MetaFieldRanker in which
+the documents are provided with a meta field "rating". The documents are first retrieved by the retriever and
+then sorted by the MetaFieldRanker.
+
+``` python
+pipeline = Pipeline()
+pipeline.add_component(component=InMemoryBM25Retriever(document_store=document_store, top_k=20)
+, name="Retriever")
+pipeline.add_component(component=MetaFieldRanker(meta_field="rating"), name="Ranker")
+pipeline.connect("Retriever.documents", "MetaFieldRanker.documents")
+```
+
+# Motivation
+
+I found the need for this feature while working on system that retrieves books based on their description and the
+similarity to the query. After retrieving the documents it makes sense to expose them to the user in order of popularity.
+This is just one example of a use case for this feature, but I think it could be useful in many other contexts.
+
+# Detailed design
+
+The actual implementation of the ranker is very similar to the already present RecentnessRanker. The main difference
+is to remove the date parsing logic.
+
+# Drawbacks
+
+The main drawback is that it would be very similar to the already present RecentnessRanker. However, this could be
+solved by making the RecentnessRanker a subclass of the MetaFieldRanker and adding the date parsing logic to it.
+Apart from that, it is a very simple component that should not have any other drawbacks.
+
+# Alternatives
+
+The alternative is to make the user implement its own ranking logic.
+
+# Adoption strategy
+
+MetaFieldRanker is a Haystack 2.0 component. As it is not a breaking change, it should be easy to adopt in combination with the other components.
+
+# How we teach this
+
+It would be sufficient to integrate a small comparison with the RecentnessRanker in the documentation.
+
+
+# Unresolved questions
+
+The main issue is the implementation strategy. Especially if we want to include the `ranking_mode` parameter in the
+MetaFieldRanker, it would make sense to have the RecentnessRanker as a subclass of the MetaFieldRanker.
diff --git a/testbed/deepset-ai__haystack/proposals/text/6784-integrations-for-eval-framworks.md b/testbed/deepset-ai__haystack/proposals/text/6784-integrations-for-eval-framworks.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3b245f1062befe91ba514f199e057092c219961
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/6784-integrations-for-eval-framworks.md
@@ -0,0 +1,151 @@
+- Title: Integration of LLM Evaluation Frameworks
+- Decision driver: Madeesh Kannan, Julian Risch
+- Start Date: 2024-01-19
+- Proposal PR: https://github.com/deepset-ai/haystack/pull/6784
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/6672
+
+# Summary
+
+- Create integrations for three different LLM evaluation frameworks in https://github.com/deepset-ai/haystack-core-integrations
+- The LLM evaluation frameworks in question are: [Uptrain](https://docs.uptrain.ai/getting-started/introduction), [RAGAS](https://docs.ragas.io/en/stable/index.html) and [DeepEval](https://docs.confident-ai.com/docs/getting-started).
+- The integrations introduce the following components in this order: `UptrainEvaluator`, `RagasEvaluator`, `DeepEvalEvaluator`.
+- Users can run a Haystack pipeline and evaluate the result with model-based metrics implemented by the evaluation frameworks.
+- Calculation of metrics is done by the frameworks through running prompts with OpenAI (Uptrain) or using langchain to make the OpenAI call (RAGAS, DeepEval).
+
+# Basic example
+
+With the integration, users can use Haystack’s pipeline concept for the evaluation too. They need to provide the outputs of the RAG pipeline as inputs to the Evaluator component in an evaluation pipeline:
+
+```python
+p = Pipeline()
+p.add_component(instance=DeepEvalEvaluator(metric="Hallucination", params={"threshold": 0.3)}, name="evaluator"))
+# p.add_component(instance=RagasEvaluator()...
+
+questions = [...]
+contexts = [...]
+answers = [...]
+
+p.run({"evaluator": {"questions": questions, "context": contexts, "answer": answers})
+# {"evaluator": DeepEvalResult(metric='hallucination', score=0.817)}
+```
+
+# Motivation
+
+Users of Haystack that deploy RAG pipelines currently do not have an avenue of evaluating the outputs of the same. Traditional methods of evaluation that involves the creation of labelled datasets is often out of reach for open-source practitioners due resource- and time-constraints. Furthermore, this approach is not necessarily scalable and applicable to LLMs due to the generative property of RAG-based information retrieval.
+
+This is given rise to model-based evaluation approaches, i.e., the method of training language models to act as classifiers and scorers. A very popular implementation of this approach revolves around designing prompt-based natural language metrics that are used in conjunction with an instruction-trained LLM. The LLM then acts as the judge, evaluating the outputs based on the criteria defined in the metrics. This approach is eminently more scalable and low-friction for the end-user.
+
+Other LLM application frameworks such as LlamaIndex already provide support for this approach to evaluation, and it is in Haystack's interests to do the same. This will also help establish a baseline against which our future efforts in this area can be compared.
+
+# Detailed design
+
+As with evaluation in Haystack 1.x, we reaffirm the core idea of implementing different pipelines for different concerns. We consider evaluation a separate process and consequently separate the execution of RAG and the metric calculation into two different pipelines. This allows for greater flexibility - for instance, the evaluation pipeline could contain an additional component that routes the inputs to different evaluator components based on certain criteria, etc. Another example would be the ability to convert the inputs from/to different formats before passing them to the evaluator.
+
+A further advantage of this approach is that any tool we develop in the future to facilitate introspection and observability of pipelines can transparently be appled to evaluation as well.
+
+The implementation of the three evaluator components should follow the general guidelines for custom component development. There are two approaches we could take:
+
+- **Metric wrapping**: Make each metric an individual component.
+
+ Advantages:
+
+ - Inputs and outputs are explicitly defined for each metric, which makes the API more explicit.
+ - We can control which metrics to include.
+ - We can use the execution graph for control flow.
+
+ Disadvantages:
+
+ - Duplication between frameworks.
+ - Maintenance burden.
+ - Less freedom for the user.
+
+- **Framework wrapping**: Make each evaluation framework an individual component.
+
+ Advantages:
+
+ - Straightforward implementation.
+ - Easier to reason about.
+ - Low maintenance burden.
+ - More freedom for the user.
+
+ Disadvantages:
+
+ - API has to accommodate all possible inputs for all supported metrics.
+ - Less flexibility.
+
+Given the above comparison, **we will be implementing the second approach, i.e., framework as the component**. The disadvantages mentioned above can be mitigated by leaning into Haystack 2.x's I/O system.
+
+## API overview
+
+- We implement three components: `DeepEvalEvaluator`, `RagasEvaluator`, `UpTrainEvaluator`.
+- Their constructors take a two parameters: `Literal["MetricName"]` and `Optional[Dict[str, Any]]` for the metric name and optional parameters for the same.
+ - We use JSON-serializable types here to ensure serde support.
+- The component initializes the internal framework-specific metric representation.
+- Depending on the metric, the component also calls `set_input_types` to register the appropriate input sockets for the given metric, e.g: `question`, `contexts`, `answer` for groundness, etc.
+ - This approach lets the user modulate the inputs individually, which wouldn't be possible if we use a generic representation for all metrics/frameworks.
+- Outputs can be implemented in one of two ways:
+
+ - `set_output_types` is also called to enumerate the outputs of the metric
+ - The output is a `dataclass` specific to the evaluation framework.
+
+ As opposed to their inputs, the outputs of metrics are not likely to require sophisticated routing for their usage further downstream. So, each evaluator will implement a dataclass that encapsulates the results of the evaluation.
+
+### Illustrative example
+
+```python
+from deepeval import BaseMetric, FaithfulnessMetric
+
+@component
+class DeepEvalEvaluator:
+ self._metric: BaseMetric
+
+ def __init__(self, metric: str, params: Optional[Dict[str, Any]]):
+ params = {} if params is None
+ if metric == "Faithfulness":
+ self._metric = FaithfulnessMetric(**params)
+ self.set_input_types(questions=List[str], answers=List[str], contexts=Listt[List[str]])
+ elif metric == "ContextRecall":
+ ...
+
+ def run(self, **kwargs):
+ # Logic to unwrap the inputs based on the metric and
+ # execute the backend code.
+ ...
+
+```
+
+# Drawbacks
+
+- **Lack of support for custom components**: The aforementioned API inherently restricts the user to the pre-defined metrics provided by the frameworks. On the other hand, we'd like to keep these integrations limited in their scope. Since we also plan on providing custom metric support in the future, this becomes a moot point.
+- **No support for batching across metrics**: Batching here refers to calculating multiple metrics on the inputs with a single API call. With the exception of UpTrain, the frameworks in question do not support this either. UpTrain's implementation is not publicly available, so we cannot determine if this type of batching happens on their server.
+- **Additional dependencies**: RAGAS and DeepEval depend on langchain, whereas UpTrain uses both its own client and OpenAI's.
+ - https://github.com/confident-ai/deepeval/blob/main/pyproject.toml
+ - https://github.com/explodinggradients/ragas/blob/main/pyproject.toml
+ - https://github.com/uptrain-ai/uptrain/blob/main/pyproject.toml
+
+# Alternatives
+
+- Eschewing the pipeline paradigm and using a separate evaluation API.
+ - Similar to how it was done in Haystack 1.x.
+- Implement them as components but run them individually.
+ - This would also be possible with the proposed API.
+
+# Adoption strategy
+
+This is a new feature with no breaking changes. Existing users can simply try out the evaluation with existing pipelines after installing additional dependencies and providing an OpenAI API key.
+
+# How we teach this
+
+We provide a new section in the documentation about evaluation. This proposal specifically deals with model-based evaluation, so it would be prudent to have a separate subsection for it.
+
+We should impress upon them the idea that evaluation is "just" another pipeline with different steps. A tutorial would also be helpful to guide them through the same. Apart from that, we should include pointers to the API docs of the different evaluation frameworks, etc.
+
+We clarify in the documentation how users can decide which Evaluation framework is best for them with a simple overview. They should not need to research the different frameworks before running their first evaluation.
+
+# Unresolved questions & future work
+
+- We see the integrated evaluation frameworks as a baseline. To what extent and when Haystack will have its own model-based metrics is out-of-scope for this proposal.
+- We envision an `Evaluator` component in Haystack's core with customizable model and prompt.
+- Terminology around existing `calculate_metrics`/`eval` functions and the `EvaluationResult` class need to be discussed (c.f https://github.com/deepset-ai/haystack/pull/6505).
+- Alternative take on the evaluation API - https://github.com/deepset-ai/haystack/pull/5794/
+ - The ideas proposed in the above should compatible with those of this proposal.
diff --git a/testbed/deepset-ai__haystack/proposals/text/7462-rag-evaluation.md b/testbed/deepset-ai__haystack/proposals/text/7462-rag-evaluation.md
new file mode 100644
index 0000000000000000000000000000000000000000..21935ce88f457a2a52ec9676d1b9bbe20e4c5880
--- /dev/null
+++ b/testbed/deepset-ai__haystack/proposals/text/7462-rag-evaluation.md
@@ -0,0 +1,223 @@
+- Title: Proposal for presentation of evaluation results
+- Decision driver: David S. Batista
+- Start Date: 2024-04-03
+- Proposal PR: #7462
+- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/7398
+
+# Summary
+
+Add a new component to Haystack allowing users interact with the results of evaluating the performance of a RAG model.
+
+
+# Motivation
+
+RAG models are one of them most popular use cases for Haystack. We are adding support for evaluations metrics, but there is no way to present the results of the evaluation.
+
+
+# Detailed design
+
+The output results of an evaluation pipeline composed of `evaluator` components are passed to a `EvaluationResults`
+(this is a placeholder name) which stores them internally and acts as an interface to access and present the results.
+
+The examples below are just for illustrative purposes and are subject to change.
+
+Example of the data structure that the `EvaluationResults` class will receive for initialization:
+
+```python
+
+data = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7"],
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "answer": ["Paris", "Madrid"],
+ "predicted_answer": ["Paris", "Madrid"]
+ },
+ "metrics":
+ [
+ {"name": "reciprocal_rank", "scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "single_hit", "scores": [1, 1, 0, 1]},
+ {"name": "multi_hit", "scores": [0.706125, 0.454976, 0.445512, 0.250522]},
+ {"name": "context_relevance", "scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ {"name": "faithfulness", "scores": [0.135581, 0.695974, 0.749861, 0.041999]},
+ {"name": "semantic_answer_similarity", "scores": [0.971241, 0.159320, 0.019722, 1]}
+ ],
+ },
+
+```
+
+The `EvaluationResults` class provides the following methods to different types of users:
+
+Basic users:
+- `individual_aggregate_score_report()`
+- `comparative_aggregate_score_report()`
+
+Intermediate users:
+- `individual_detailed_score_report()`
+- `comparative_detailed_score_report()`
+
+Advanced users:
+- `find_thresholds()`
+- `find_inputs_below_threshold()`
+
+
+### Methods description
+An evaluation report that provides a summary of the performance of the model across all queries, showing the
+aggregated scores for all available metrics.
+
+```python
+def individual_aggregate_score_report():
+```
+
+Example output
+
+```bash
+{'Reciprocal Rank': 0.448,
+ 'Single Hit': 0.5,
+ 'Multi Hit': 0.540,
+ 'Context Relevance': 0.537,
+ 'Faithfulness': 0.452,
+ 'Semantic Answer Similarity': 0.478
+ }
+ ```
+
+A detailed evaluation report that provides the scores of all available metrics for all queries or a subset of queries.
+
+```python
+def individual_detailed_score_report(queries: Union[List[str], str] = "all"):
+```
+
+Example output
+
+```bash
+| question | context | answer | predicted_answer | reciprocal_rank | single_hit | multi_hit | context_relevance | faithfulness | semantic_answer_similarity |
+|----------|---------|--------|------------------|-----------------|------------|-----------|-------------------|-------------|----------------------------|
+| What is the capital of France? | wiki_France | Paris | Paris | 0.378064 | 1 | 0.706125 | 0.805466 | 0.135581 | 0.971241 |
+| What is the capital of Spain? | wiki_Spain | Madrid | Madrid | 0.534964 | 1 | 0.454976 | 0.410251 | 0.695974 | 0.159320 |
+```
+
+### Comparative Evaluation Report
+
+A comparative summary that compares the performance of the model with another model based on the aggregated scores
+for all available metrics.
+
+```python
+def comparative_aggregate_score_report(self, other: "EvaluationResults"):
+```
+
+```bash
+{
+ "model_1": {
+ 'Reciprocal Rank': 0.448,
+ 'Single Hit': 0.5,
+ 'Multi Hit': 0.540,
+ 'Context Relevance': 0.537,
+ 'Faithfulness': 0.452,
+ 'Semantic Answer Similarity': 0.478
+ },
+ "model_2": {
+ 'Reciprocal Rank': 0.448,
+ 'Single Hit': 0.5,
+ 'Multi Hit': 0.540,
+ 'Context Relevance': 0.537,
+ 'Faithfulness': 0.452,
+ 'Semantic Answer Similarity': 0.478
+ }
+}
+
+```
+
+A detailed comparative summary that compares the performance of the model with another model based on the scores of all
+available metrics for all queries.
+
+
+```python
+def comparative_detailed_score_report(self, other: "EvaluationResults"):
+```
+
+```bash
+| question | context | answer | predicted_answer_model_1 | predicted_answer_model_2 | reciprocal_rank_model_1 | reciprocal_rank_model_2 | single_hit_model_1 | single_hit_model_2 | multi_hit_model_1 | multi_hit_model_2 | context_relevance_model_1 | context_relevance_model_2 | faithfulness_model_1 | faithfulness_model_2 | semantic_answer_similarity_model_1 | semantic_answer_similarity_model_2 |
+|----------|---------|--------|--------------------------|--------------------------|-------------------------|-------------------------|--------------------|--------------------|-------------------|-------------------|---------------------------|---------------------------|----------------------|----------------------|------------------------------------|------------------------------------|
+| What is the capital of France? | wiki_France | Paris | Paris | Paris | 0.378064 | 0.378064 | 1 | 1 | 0.706125 | 0.706125 | 0.805466 | 0.805466 | 0.135581 | 0.135581 | 0.971241 | 0.971241 |
+| What is the capital of Spain? | wiki_Spain | Madrid | Madrid | Madrid | 0.534964 | 0.534964 | 1 | 1 | 0.454976 | 0.454976 | 0.410251 | 0.410251 | 0.695974 | 0.695974 | 0.159320 | 0.159320 |
+````
+
+
+Have a method to find interesting scores thresholds, typically used for error analysis, for all metrics available.
+Some potentially interesting thresholds to find are: the 25th percentile, the 75th percentile, the mean , the median.
+
+```python
+def find_thresholds(self, metrics: List[str]) -> Dict[str, float]:
+```
+
+```bash
+data = {
+ "thresholds": ["25th percentile", "75th percentile", "median", "average"],
+ "reciprocal_rank": [0.378064, 0.534964, 0.216058, 0.778642],
+ "context_relevance": [0.805466, 0.410251, 0.750070, 0.361332],
+ "faithfulness": [0.135581, 0.695974, 0.749861, 0.041999],
+ "semantic_answer_similarity": [0.971241, 0.159320, 0.019722, 1],
+}
+````
+
+Then have another method that
+
+```python
+def find_inputs_below_threshold(self, metric: str, threshold: float):
+ """Get the all the queries with a score below a certain threshold for a given metric"""
+```
+
+# Drawbacks
+
+- Having the output in a format table may not be flexible enough, and maybe too verbose for datasets with a large number of queries.
+- Maybe the option to export to a .csv file would be better than having the output in a table format.
+- Maybe a JSON format would be better with the option for advanced users to do further analysis and visualization.
+
+
+# Adoption strategy
+
+- Doesn't introduce any breaking change, it is a new feature that can be adopted by users as they see fit for their use cases.
+
+# How we teach this
+
+- A tutorial would be the best approach to teach users how to use this feature.
+- Adding a new entry to the documentation.
+
+# User stories
+
+### 1. I would like to get a single summary score for my RAG pipeline so I can compare several pipeline configurations.
+
+Run `individual_aggregate_score_report()` and get the following output:
+
+```bash
+{'Reciprocal Rank': 0.448,
+ 'Single Hit': 0.5,
+ 'Multi Hit': 0.540,
+ 'Context Relevance': 0.537,
+ 'Faithfulness': 0.452,
+ 'Semantic Answer Similarity': 0.478
+ }
+ ```
+
+### 2. I am not sure what evaluation metrics work best for my RAG pipeline, specially when using the more novel LLM-based
+
+Use `context relevance` or `faithfulness`
+
+### 3. My RAG pipeline has a low aggregate score, so I would like to see examples of specific inputs where the score was low to be able to diagnose what the issue could be.
+
+Let's say it's a low score in `reciprocal_rank` and one already has an idea of what "low" is a query/question, then simply run:
+
+ find_inputs_below_threshold("reciprocal_rank", )
+
+If the low score is in `reciprocal_rank` one can first get thresholds for this metric using:
+
+ `find_thresholds(["reciprocal_rank"])`
+
+this will give:
+
+- 25th percentile: (Q1) the value below which 25% of the data falls.
+- median percentile: (Q2) the value below which 50% of the data falls.
+- 75th percentile: (Q3) the value below which 75% of the data falls.
+
+this can help to decide what is considered a low score, and then get, for instance, queries with a score below
+the Q2 threshold using `find_inputs_below_threshold("context_relevance", threshold)`
diff --git a/testbed/deepset-ai__haystack/pyproject.toml b/testbed/deepset-ai__haystack/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..a12f7ddf4c5db0a86d3e5d880c705b3879a9a8af
--- /dev/null
+++ b/testbed/deepset-ai__haystack/pyproject.toml
@@ -0,0 +1,354 @@
+[build-system]
+requires = ["hatchling>=1.8.0"]
+build-backend = "hatchling.build"
+
+[project]
+name = "haystack-ai"
+dynamic = ["version"]
+description = "LLM framework to build customizable, production-ready LLM applications. Connect components (models, vector DBs, file converters) to pipelines or agents that can interact with your data."
+readme = "README.md"
+license = "Apache-2.0"
+requires-python = ">=3.8"
+authors = [{ name = "deepset.ai", email = "malte.pietsch@deepset.ai" }]
+keywords = [
+ "BERT",
+ "QA",
+ "Question-Answering",
+ "Reader",
+ "Retriever",
+ "albert",
+ "language-model",
+ "mrc",
+ "roberta",
+ "search",
+ "semantic-search",
+ "squad",
+ "transfer-learning",
+ "transformer",
+]
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Science/Research",
+ "License :: Freely Distributable",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+]
+dependencies = [
+ "pandas",
+ "tqdm",
+ "tenacity!=8.4.0",
+ "lazy-imports",
+ "openai>=1.1.0",
+ "Jinja2",
+ "posthog", # telemetry
+ "pyyaml",
+ "more-itertools", # TextDocumentSplitter
+ "networkx", # Pipeline graphs
+ "typing_extensions>=4.7", # typing support for Python 3.8
+ "requests",
+ "numpy",
+ "python-dateutil",
+ "haystack-experimental",
+]
+
+[tool.hatch.envs.default]
+installer = "uv"
+dependencies = [
+ "pre-commit",
+ "ruff",
+ "toml",
+ "reno",
+ # dulwich is a reno dependency, they pin it at >=0.15.0 so pip takes ton of time to resolve the dependency tree.
+ # We pin it here to avoid taking too much time.
+ # https://opendev.org/openstack/reno/src/branch/master/requirements.txt#L7
+ "dulwich>=0.21.0,<1.0.0",
+]
+
+[tool.hatch.envs.default.scripts]
+release-note = "reno new {args}"
+check = "ruff check {args}"
+fix = "ruff check --fix"
+format = "ruff format {args}"
+format-check = "ruff format --check {args}"
+
+[tool.hatch.envs.test]
+extra-dependencies = [
+ "numpy>=2", # Haystack is compatible both with numpy 1.x and 2.x, but we test with 2.x
+
+ "transformers[torch,sentencepiece]==4.44.2", # ExtractiveReader, TransformersSimilarityRanker, LocalWhisperTranscriber, HFGenerators...
+ "huggingface_hub>=0.23.0", # Hugging Face API Generators and Embedders
+ "sentence-transformers>=3.0.0", # SentenceTransformersTextEmbedder and SentenceTransformersDocumentEmbedder
+ "langdetect", # TextLanguageRouter and DocumentLanguageClassifier
+ "openai-whisper>=20231106", # LocalWhisperTranscriber
+ "arrow>=1.3.0", # Jinja2TimeExtension
+
+ # NamedEntityExtractor
+ "spacy>=3.8,<3.9",
+ "spacy-curated-transformers>=0.2,<=0.3",
+ "en-core-web-trf @ https://github.com/explosion/spacy-models/releases/download/en_core_web_trf-3.8.0/en_core_web_trf-3.8.0-py3-none-any.whl",
+
+ # Converters
+ "pypdf", # PyPDFToDocument
+ "pdfminer.six", # PDFMinerToDocument
+ "markdown-it-py", # MarkdownToDocument
+ "mdit_plain", # MarkdownToDocument
+ "tika", # TikaDocumentConverter
+ "azure-ai-formrecognizer>=3.2.0b2", # AzureOCRDocumentConverter
+ "trafilatura", # HTMLToDocument
+ "python-pptx", # PPTXToDocument
+ "python-docx", # DocxToDocument
+ "jq", #JSONConverter
+
+ "nltk", # NLTKDocumentSplitter
+
+ # OpenAPI
+ "jsonref", # OpenAPIServiceConnector, OpenAPIServiceToFunctions
+ "openapi3",
+
+ # Validation
+ "jsonschema",
+
+ # Tracing
+ "opentelemetry-sdk",
+ "ddtrace",
+
+ # Structured logging
+ "structlog",
+
+ # Looking for missing imports
+ "isort",
+ "pyproject-parser",
+
+ # Test
+ "pytest",
+ "pytest-bdd",
+ "pytest-cov",
+ "pytest-custom_exit_code", # used in the CI
+ "pytest-asyncio",
+ "pytest-rerunfailures",
+ "responses",
+ "tox",
+ "coverage",
+ "python-multipart",
+ "psutil",
+ "mypy",
+ "pip", # mypy needs pip to install missing stub packages
+ "pylint",
+ "ipython",
+]
+
+[tool.hatch.envs.test.scripts]
+e2e = "pytest e2e"
+unit = 'pytest --cov-report xml:coverage.xml --cov="haystack" -m "not integration" {args:test}'
+integration = 'pytest --maxfail=5 -m "integration" {args:test}'
+integration-mac = 'pytest --maxfail=5 -m "integration" -k "not tika" {args:test}'
+integration-windows = 'pytest --maxfail=5 -m "integration" -k "not tika" {args:test}'
+types = "mypy --install-types --non-interactive --cache-dir=.mypy_cache/ {args:haystack}"
+lint = "pylint -ry -j 0 {args:haystack}"
+
+[tool.hatch.envs.readme]
+installer = "uv"
+detached = true # To avoid installing the dependencies from the default environment
+dependencies = ["haystack-pydoc-tools"]
+
+[tool.hatch.envs.readme.scripts]
+sync = "./.github/utils/pydoc-markdown.sh"
+delete-outdated = "python ./.github/utils/delete_outdated_docs.py {args}"
+
+[project.urls]
+"CI: GitHub" = "https://github.com/deepset-ai/haystack/actions"
+"Docs: RTD" = "https://haystack.deepset.ai/overview/intro"
+"GitHub: issues" = "https://github.com/deepset-ai/haystack/issues"
+"GitHub: repo" = "https://github.com/deepset-ai/haystack"
+Homepage = "https://github.com/deepset-ai/haystack"
+
+[tool.hatch.version]
+path = "VERSION.txt"
+pattern = "(?P.+)"
+
+[tool.hatch.metadata]
+allow-direct-references = true
+
+[tool.hatch.build.targets.sdist]
+include = ["/haystack", "/VERSION.txt"]
+
+[tool.hatch.build.targets.wheel]
+packages = ["haystack"]
+
+[tool.codespell]
+ignore-words-list = "ans,astroid,nd,ned,nin,ue,rouge,ist"
+quiet-level = 3
+skip = "./test,./e2e"
+
+[tool.pylint.'MESSAGES CONTROL']
+max-line-length = 120
+disable = [
+
+ # To keep
+ "fixme",
+ "c-extension-no-member",
+
+ # To review:
+ "missing-docstring",
+ "unused-argument",
+ "no-member",
+ "line-too-long",
+ "protected-access",
+ "too-few-public-methods",
+ "raise-missing-from",
+ "invalid-name",
+ "duplicate-code",
+ "arguments-differ",
+ "consider-using-f-string",
+ "no-else-return",
+ "attribute-defined-outside-init",
+ "super-with-arguments",
+ "redefined-builtin",
+ "abstract-method",
+ "unspecified-encoding",
+ "unidiomatic-typecheck",
+ "no-name-in-module",
+ "consider-using-with",
+ "redefined-outer-name",
+ "arguments-renamed",
+ "unnecessary-pass",
+ "broad-except",
+ "unnecessary-comprehension",
+ "subprocess-run-check",
+ "singleton-comparison",
+ "consider-iterating-dictionary",
+ "undefined-loop-variable",
+ "consider-using-in",
+ "bare-except",
+ "unexpected-keyword-arg",
+ "simplifiable-if-expression",
+ "use-list-literal",
+ "broad-exception-raised",
+
+ # To review later
+ "cyclic-import",
+ "import-outside-toplevel",
+ "deprecated-method",
+]
+[tool.pylint.'DESIGN']
+max-args = 38 # Default is 5
+max-attributes = 28 # Default is 7
+max-branches = 34 # Default is 12
+max-locals = 45 # Default is 15
+max-module-lines = 2468 # Default is 1000
+max-nested-blocks = 9 # Default is 5
+max-statements = 206 # Default is 50
+[tool.pylint.'SIMILARITIES']
+min-similarity-lines = 6
+
+[tool.pytest.ini_options]
+minversion = "6.0"
+addopts = "--strict-markers"
+markers = [
+ "unit: unit tests",
+ "integration: integration tests",
+
+ "generator: generator tests",
+ "summarizer: summarizer tests",
+ "embedding_dim: uses a document store with non-default embedding dimension (e.g @pytest.mark.embedding_dim(128))",
+
+ "tika: requires Tika container",
+ "parsr: requires Parsr container",
+ "ocr: requires Tesseract",
+
+ "elasticsearch: requires Elasticsearch container",
+ "weaviate: requires Weaviate container",
+ "pinecone: requires Pinecone credentials",
+ "faiss: uses FAISS",
+ "opensearch",
+ "document_store",
+]
+log_cli = true
+
+[tool.mypy]
+warn_return_any = false
+warn_unused_configs = true
+ignore_missing_imports = true
+
+[tool.ruff]
+line-length = 120
+target-version = "py38"
+exclude = [".github", "proposals"]
+
+[tool.ruff.format]
+skip-magic-trailing-comma = true
+
+[tool.ruff.lint]
+isort.split-on-trailing-comma = false
+exclude = ["test/**", "e2e/**"]
+select = [
+ "ASYNC", # flake8-async
+ "C4", # flake8-comprehensions
+ "C90", # McCabe cyclomatic complexity
+ "E501", # Long lines
+ "EXE", # flake8-executable
+ "F", # Pyflakes
+ "INT", # flake8-gettext
+ "PERF", # Perflint
+ "PL", # Pylint
+ "Q", # flake8-quotes
+ "SIM", # flake8-simplify
+ "SLOT", # flake8-slots
+ "T10", # flake8-debugger
+ "W", # pycodestyle
+ "YTT", # flake8-2020
+ "I", # isort
+ # built-in shadowing
+ "A001", # builtin-variable-shadowing
+ "A002", # builtin-argument-shadowing
+ "A003", # builtin-attribute-shadowing
+ # docstring rules
+ "D102", # Missing docstring in public method
+ "D103", # Missing docstring in public function
+ "D209", # Closing triple quotes go to new line
+ "D205", # 1 blank line required between summary line and description
+ "D213", # summary lines must be positioned on the second physical line of the docstring
+ "D417", # undocumented-parameter
+ "D419", # undocumented-returns
+]
+
+ignore = [
+ "F401", # unused-import
+ "PERF203", # `try`-`except` within a loop incurs performance overhead
+ "PERF401", # Use a list comprehension to create a transformed list
+ "PLR1714", # repeated-equality-comparison
+ "PLR5501", # collapsible-else-if
+ "PLW0603", # global-statement
+ "PLW1510", # subprocess-run-without-check
+ "PLW2901", # redefined-loop-name
+ "SIM108", # if-else-block-instead-of-if-exp
+ "SIM115", # open-file-with-context-handler
+ "SIM118", # in-dict-keys
+]
+
+[tool.ruff.lint.mccabe]
+max-complexity = 28
+
+[tool.ruff.lint.per-file-ignores]
+"examples/basic_qa_pipeline.py" = ["C416"]
+"haystack/preview/testing/document_store.py" = ["C416", "F821"]
+"haystack/telemetry.py" = ["F821"]
+
+[tool.ruff.lint.pylint]
+allow-magic-value-types = ["float", "int", "str"]
+max-args = 14 # Default is 5
+max-branches = 21 # Default is 12
+max-public-methods = 20 # Default is 20
+max-returns = 7 # Default is 6
+max-statements = 60 # Default is 50
+
+[tool.coverage.run]
+omit = ["haystack/testing/*"]
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/AnalyzeResult-forward-declaration-5ed1bd9b6dc62c6f.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/AnalyzeResult-forward-declaration-5ed1bd9b6dc62c6f.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..15986221c016da17c64d42eb5b0310fb7403e9ae
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/AnalyzeResult-forward-declaration-5ed1bd9b6dc62c6f.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Forward declaration of `AnalyzeResult` type in `AzureOCRDocumentConverter`.
+
+ `AnalyzeResult` is already imported in a lazy import block.
+ The forward declaration avoids issues when `azure-ai-formrecognizer>=3.2.0b2` is not installed.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/adapt-chat-promp-builder-message-copy-6aa4de39363ffbf2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/adapt-chat-promp-builder-message-copy-6aa4de39363ffbf2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c3a9e049d658405dbeb6fb7d4d033a79f6ebe8ac
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/adapt-chat-promp-builder-message-copy-6aa4de39363ffbf2.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Adapts how ChatPromptBuilder creates ChatMessages. Messages are deep copied to ensure all meta fields are copied correctly.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-azure-kwargs-6a5ab1358ef7f44c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-azure-kwargs-6a5ab1358ef7f44c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19e3bb0b6265e570ae6dca641b8a9f510f782f4d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-azure-kwargs-6a5ab1358ef7f44c.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Expose default_headers to pass custom headers to Azure API including APIM subscription key.
+ - |
+ Add optional azure_kwargs dictionary parameter to pass in parameters undefined in Haystack but supported by AzureOpenAI.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-batch_size_faiss_init-5e97c1fb9409f873.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-batch_size_faiss_init-5e97c1fb9409f873.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7d0956d68221a6ee48f4a0a6465b557954146f81
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-batch_size_faiss_init-5e97c1fb9409f873.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Add batch_size to the __init__ method of FAISS Document Store. This works as the default value for all methods of
+ FAISS Document Store that support batch_size.
\ No newline at end of file
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-csv-converter-5c0d52f180d498f5.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-csv-converter-5c0d52f180d498f5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5a01c94578615a5ad91fc7b2ae11270a18f84de
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-csv-converter-5c0d52f180d498f5.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add a CSV to Document converter component. Loads the file as bytes object. Adds the loaded string as a new document that can be used for further processing by the Document Splitter.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-dimensions-parameter-azureopeaniembedders-ef48fcc7c064b45c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-dimensions-parameter-azureopeaniembedders-ef48fcc7c064b45c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..62277a5f16f4a41ae7228e96be9a75a13c9d76bd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-dimensions-parameter-azureopeaniembedders-ef48fcc7c064b45c.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ add dimensions parameter to Azure OpenAI Embedders (AzureOpenAITextEmbedder and AzureOpenAIDocumentEmbedder) to fully support new embedding models like text-embedding-3-small, text-embedding-3-large and upcoming ones
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-diversity-ranker-6ecee21134eda673.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-diversity-ranker-6ecee21134eda673.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0a9dfa90587ca5d64b0fc456c32d2ae6494b1be6
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-diversity-ranker-6ecee21134eda673.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `SentenceTransformersDiversityRanker`.
+ The Diversity Ranker orders documents in such a way as to maximize the overall diversity of the given documents.
+ The ranker leverages sentence-transformer models to calculate semantic embeddings for each document and the query.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12fc6794a4c9f037a1bf905844d965200d9dfce8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add FilterRetriever.
+ It retrieves documents that match the provided (either at init or runtime) filters.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-hf-free-tier-checks-99384060139d5d30.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-hf-free-tier-checks-99384060139d5d30.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a56e4dd60c1e3246479fbaad853c5c8d56f9b4a6
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-hf-free-tier-checks-99384060139d5d30.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Resolves a bug where the HuggingFaceTGIGenerator and HuggingFaceTGIChatGenerator encountered issues if provided
+ with valid models that were not available on the HuggingFace inference API rate-limited tier. The fix, detailed
+ in [GitHub issue #6816](https://github.com/deepset-ai/haystack/issues/6816) and its GitHub PR, ensures these
+ components now correctly handle model availability, eliminating previous limitations.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-logs-empty-files-pdf-f28a14e52984c1ea.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-logs-empty-files-pdf-f28a14e52984c1ea.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd5637dba0d9bac5618b9c6ce7b47922b654e209
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-logs-empty-files-pdf-f28a14e52984c1ea.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add warning logs to the PDFMinerToDocument and PyPDFToDocument to indicate when a processed PDF file has no content.
+ This can happen if the PDF file is a scanned image.
+ Also added an explicit check and warning message to the DocumentSplitter that warns the user that empty Documents are skipped.
+ This behavior was already occurring, but now its clearer through logs that this is happening.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83c9e57682c051ad7f02f0ca8f2ef725ec41f112
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
@@ -0,0 +1,8 @@
+---
+
+features:
+ - |
+ Add LostInTheMiddleRanker.
+ It reorders documents based on the "Lost in the Middle" order, a strategy that
+ places the most relevant paragraphs at the beginning or end of the context,
+ while less relevant paragraphs are positioned in the middle.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-min-top-k-top-p-sampler-dc65f97d420e70b0.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-min-top-k-top-p-sampler-dc65f97d420e70b0.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5173f2db195dd7d74b547e81d98798af584b6227
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-min-top-k-top-p-sampler-dc65f97d420e70b0.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ The parameter min_top_k is added to the TopPSampler which sets the minimum number of documents to be returned when the top-p sampling algorithm results in fewer documents being selected. The documents with the next highest scores are added to the selection. This is useful when we want to guarantee a set number of documents will always be passed on, but allow the Top-P algorithm to still determine if more documents should be sent based on document score.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-model-and-tokenizer-kwargs-4b7618806665f8ba.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-model-and-tokenizer-kwargs-4b7618806665f8ba.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5f0395d94ea644eef184c574419a2a56fa1f13f8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-model-and-tokenizer-kwargs-4b7618806665f8ba.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Adds model_kwargs and tokenizer_kwargs to the components TransformersSimilarityRanker, SentenceTransformersDocumentEmbedder, SentenceTransformersTextEmbedder.
+ This allows passing things like model_max_length or torch_dtype for better management of model inference.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2048fcf8d7419a89ebffe2cfaa68fd03b8d6884e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add support for Mean Reciprocal Rank (MRR) Metric to `StatisticalEvaluator`.
+ MRR measures the mean reciprocal rank of times a label is present in at least one or more predictions.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fba2fdcce32b908bc40e526569fda5952f8365c2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Introducing the OutputAdapter component which enables seamless data flow between pipeline components by adapting the output of one component to match the expected input of another using Jinja2 template expressions. This addition opens the door to greater flexibility in pipeline configurations, facilitating custom adaptation rules and exemplifying a structured approach to inter-component communication.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-pipeline-templates-831f857c6387f8c3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-pipeline-templates-831f857c6387f8c3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eb7f21c19a1db0a00363c48f8f5e716ffa07d8f7
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-pipeline-templates-831f857c6387f8c3.yaml
@@ -0,0 +1,13 @@
+---
+highlights: >
+ Introducing a flexible and dynamic approach to creating NLP pipelines with predefined templates.
+ This innovative feature allows users to easily construct and customize complex data processing pipelines for various
+ NLP tasks.
+
+ For example, the following pipeline template can be used to create an indexing pipeline:
+ ```python
+ from haystack import Pipeline, PredefinedPipeline
+
+ pipe = Pipeline.from_template(PredefinedPipeline.INDEXING)
+ result = pipe.run({"converter": {"sources": ["some_file.txt"]}})
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-streaming-callback-run-param-to-hf-generators-5ebde8fad75cb49f.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-streaming-callback-run-param-to-hf-generators-5ebde8fad75cb49f.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..792a6c32723c61ef42c3e5dabb240e04456594eb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-streaming-callback-run-param-to-hf-generators-5ebde8fad75cb49f.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Add `streaming_callback` run parameter to `HuggingFaceAPIGenerator` and `HuggingFaceLocalGenerator` to allow users to pass a callback function that will be called after each chunk of the response is generated.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-string-joiner-a7754e6bff9332ea.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-string-joiner-a7754e6bff9332ea.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3cbf8f554dba95f2ba884a425d3746da244ccab2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-string-joiner-a7754e6bff9332ea.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Added component StringJoiner to join strings from different components to a list of strings.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-truncate-normalize-tei-6c998b14154267bb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-truncate-normalize-tei-6c998b14154267bb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a76a4b61bb96d3e4ab5d0df1a9aa698370ea78fc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-truncate-normalize-tei-6c998b14154267bb.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds `truncate` and `normalize` parameters to `HuggingFaceTEITextEmbedder` and `HuggingFaceTEITextEmbedder` for allowing truncation and normalization of embeddings.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-trust-remote-code-feature-c133b2b245d2ea7a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-trust-remote-code-feature-c133b2b245d2ea7a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a4cf31426d1b9bb640ea3a289b3d8949d906df45
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-trust-remote-code-feature-c133b2b245d2ea7a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add trust_remote_code parameter to SentenceTransformersDocumentEmbedder and SentenceTransformersTextEmbedder for allowing custom models and scripts.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-unicode-normalization-and-ascii-mode-to-document-cleaner-ba536b46e499663c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-unicode-normalization-and-ascii-mode-to-document-cleaner-ba536b46e499663c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d4d28ee47bc108c19b1b47076c5e395522c3ca8e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-unicode-normalization-and-ascii-mode-to-document-cleaner-ba536b46e499663c.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Added `unicode_normalization` parameter to the DocumentCleaner, allowing to normalize the text to NFC, NFD, NFKC, or NFKD.
+ - |
+ Added `ascii_only` parameter to the DocumentCleaner, transforming letters with diacritics to their ASCII equivalent and removing other non-ASCII characters.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-usage-huggingfaceapi-generators-de8979cad6b6ec45.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-usage-huggingfaceapi-generators-de8979cad6b6ec45.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..27cde7a3088aa190df6f5655ce0db7c41e05d39f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-usage-huggingfaceapi-generators-de8979cad6b6ec45.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Adds 'usage' meta field with 'prompt_tokens' and 'completion_tokens' keys to HuggingFaceAPIChatGenerator.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-window-size-parameter-runtime-e841c4471f9d6b9c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-window-size-parameter-runtime-e841c4471f9d6b9c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e9d5aa2ed77c8c3fbb565a330fae505af5d36bae
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-window-size-parameter-runtime-e841c4471f9d6b9c.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ The SentenceWindowRetriever now supports the `window_size` parameter at run time, overwriting the value set in the constructor.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/add-zero-shot-document-classifier-3ab1d7bbdc04db05.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/add-zero-shot-document-classifier-3ab1d7bbdc04db05.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82f35c467116265f2200eaece2151dad113d5a27
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/add-zero-shot-document-classifier-3ab1d7bbdc04db05.yaml
@@ -0,0 +1,7 @@
+---
+highlights: >
+ Adds support for zero shot document classification
+features:
+ - |
+ Adds support for zero shot document classification. This allows you to classify documents into user-defined
+ classes (binary and multi-label classification) using pre-trained models from huggingface.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/adjust-max-new-tokens-to-512-in-huggingface-generators-76d9aba116b65e70.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/adjust-max-new-tokens-to-512-in-huggingface-generators-76d9aba116b65e70.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1103cfb613b56ab9febbfd311ca920945e064752
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/adjust-max-new-tokens-to-512-in-huggingface-generators-76d9aba116b65e70.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Set max_new_tokens default to 512 in Hugging Face generators.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/async-component-support-machinery-6ea4496241aeb3b2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/async-component-support-machinery-6ea4496241aeb3b2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5dc43dccd212a8c87815d27fde68ee575bbc22e3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/async-component-support-machinery-6ea4496241aeb3b2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Extend core component machinery to support an optional asynchronous `run_async` method in components.
+ If it's present, it should have the same parameters (and output types) as the run method and must be
+ implemented as a coroutine.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/azure-ocr-converter-enhancements-c882456cad9a5efc.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/azure-ocr-converter-enhancements-c882456cad9a5efc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dcce43c091d2dea746ad2b03cb763fb447b41a37
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/azure-ocr-converter-enhancements-c882456cad9a5efc.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Enhanced the AzureOCRDocumentConverter to include advanced handling of tables and text. Features such as extracting preceding and following context for tables, merging multiple column headers, and enabling single column page layout for text have been introduced. This update furthers the flexibility and accuracy of document conversion within complex layouts.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/better-deserialization-errors-f2b0e226534f4cd2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/better-deserialization-errors-f2b0e226534f4cd2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af2ea1986c5cf14bed3aaeb628627e94a490c64f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/better-deserialization-errors-f2b0e226534f4cd2.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Improved error messages for deserialization errors.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/change-default-openai-model-99ed5477500196e2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/change-default-openai-model-99ed5477500196e2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c4c36b33ff4fa1e90964bd861a53de96836e532e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/change-default-openai-model-99ed5477500196e2.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ `gpt-3.5-turbo` was replaced by `gpt-4o-mini` as the default model for all components relying on OpenAI API
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/chat-with-website-template-23bec121f1a78726.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/chat-with-website-template-23bec121f1a78726.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2ba30b4148ec89d660df09801f00081b651fdfb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/chat-with-website-template-23bec121f1a78726.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ Add a new pipeline template `PredefinedPipeline.CHAT_WITH_WEBSITE` to quickly create a pipeline
+ that will answer questions based on data collected from one or more web pages.
+
+ Usage example:
+ ```python
+ from haystack import Pipeline, PredefinedPipeline
+
+ pipe = Pipeline.from_template(PredefinedPipeline.CHAT_WITH_WEBSITE)
+ result = pipe.run({
+ "fetcher": {"urls": ["https://haystack.deepset.ai/overview/quick-start"]},
+ "prompt": {"query": "How should I install Haystack?"}}
+ )
+
+ print(result["llm"]["replies"][0])
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d3709c2e21fa43b5c7895afb0e9ab21b9cf1b815
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
@@ -0,0 +1,89 @@
+---
+features:
+ - |
+ Added option to instrument pipeline and component runs.
+ This allows users to observe their pipeline runs and component runs in real-time via their chosen observability
+ tool. Out-of-the-box support for OpenTelemetry and Datadog will be added in separate contributions.
+
+ Example usage for [OpenTelemetry](https://opentelemetry.io/docs/languages/python/):
+
+ 1. Install OpenTelemetry SDK and exporter:
+ ```bash
+ pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-http
+ ```
+
+ 2. Configure OpenTelemetry SDK with your tracing provider and exporter:
+ ```python
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+
+ from opentelemetry import trace
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+ # Service name is required for most backends
+ resource = Resource(attributes={
+ SERVICE_NAME: "haystack"
+ })
+
+ traceProvider = TracerProvider(resource=resource)
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces"))
+ traceProvider.add_span_processor(processor)
+ trace.set_tracer_provider(traceProvider)
+
+ tracer = traceProvider.get_tracer("my_application")
+
+
+ 3. Create tracer
+ ```python
+ import contextlib
+ from typing import Optional, Dict, Any, Iterator
+
+ from opentelemetry import trace
+ from opentelemetry.trace import NonRecordingSpan
+
+ from haystack.tracing import Tracer, Span
+ from haystack.tracing import utils as tracing_utils
+ import opentelemetry.trace
+
+
+ class OpenTelemetrySpan(Span):
+ def __init__(self, span: opentelemetry.trace.Span) -> None:
+ self._span = span
+
+ def set_tag(self, key: str, value: Any) -> None:
+ coerced_value = tracing_utils.coerce_tag_value(value)
+ self._span.set_attribute(key, coerced_value)
+
+
+ class OpenTelemetryTracer(Tracer):
+ def __init__(self, tracer: opentelemetry.trace.Tracer) -> None:
+ self._tracer = tracer
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ with self._tracer.start_as_current_span(operation_name) as span:
+ span = OpenTelemetrySpan(span)
+ if tags:
+ span.set_tags(tags)
+
+ yield span
+
+ def current_span(self) -> Optional[Span]:
+ current_span = trace.get_current_span()
+ if isinstance(current_span, NonRecordingSpan):
+ return None
+
+ return OpenTelemetrySpan(current_span)
+
+ ```
+
+ 4. Use the tracer with Haystack:
+ ```python
+ from haystack import tracing
+
+ haystack_tracer = OpenTelemetryTracer(tracer)
+ tracing.enable_tracing(haystack_tracer)
+ ```
+
+ 5. Run your pipeline
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/compatibility-hh-hub-0.26.0-a5b61f7bf99a97b7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/compatibility-hh-hub-0.26.0-a5b61f7bf99a97b7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f12d1c3dc829fbbb8b5cad6b653b78212fb92c15
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/compatibility-hh-hub-0.26.0-a5b61f7bf99a97b7.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Adjusted a test on `HuggingFaceAPIGenerator` to ensure compatibility with the `huggingface_hub==0.26.0`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/complex-types-openapi-support-84d3daf8927ad915.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/complex-types-openapi-support-84d3daf8927ad915.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2041b74081174c5c1143b86de70d0b6dca8fe86d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/complex-types-openapi-support-84d3daf8927ad915.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Enhanced OpenAPI integration by handling complex types of requests and responses in OpenAPIServiceConnector and OpenAPIServiceToFunctions.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/component-greedy-d6630af901e96a4c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/component-greedy-d6630af901e96a4c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c973a150d362029163f5c30813098712f658c566
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/component-greedy-d6630af901e96a4c.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Add `is_greedy` argument to `@component` decorator.
+ This flag will change the behaviour of `Component`s with inputs that have a `Variadic` type
+ when running inside a `Pipeline`.
+
+ Variadic `Component`s that are marked as greedy will run as soon as they receive their first input.
+ If not marked as greedy instead they'll wait as long as possible before running to make sure they
+ receive as many inputs as possible from their senders.
+
+ It will be ignored for all other `Component`s even if set explicitly.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/component-repr-a6486af81530bc3b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a7439e9266298b8feba2595b12d30661ba45445
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Add `__repr__` to all Components to print their I/O.
+ This can also be useful in Jupyter notebooks as this will be shown as a cell output
+ if the it's the last expression in a cell.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/component-set-input-type-constraints-0b1130c1b49d9648.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/component-set-input-type-constraints-0b1130c1b49d9648.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fa7c3bc06cab784812f143537aa7e07f9bad52e3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/component-set-input-type-constraints-0b1130c1b49d9648.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Add constraints to `component.set_input_type` and `component.set_input_types` to prevent undefined behaviour
+ when the `run` method does not contain a variadic keyword argument.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/component-set-output-type-override-852a19b3f0621fb0.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/component-set-output-type-override-852a19b3f0621fb0.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a06faddebff161d431dffd3dbd4ce5676d81542
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/component-set-output-type-override-852a19b3f0621fb0.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Prevent `set_output_types`` from being called when the `output_types`` decorator is used.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/conditional-routes-validation-b46fc506d35894d4.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/conditional-routes-validation-b46fc506d35894d4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..315510b2821560f3cb7e1ece2acc7d3f53be2458
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/conditional-routes-validation-b46fc506d35894d4.yaml
@@ -0,0 +1,7 @@
+---
+enhancements:
+ - |
+ Add output type validation in `ConditionalRouter`.
+ Setting `validate_output_type` to `True` will enable a check to verify if
+ the actual output of a route returns the declared type.
+ If it doesn't match a `ValueError` is raised.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/content-tracing-25df3909e11924b1.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/content-tracing-25df3909e11924b1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff290e9ec7c9b36b287d38f868590270b931e621
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/content-tracing-25df3909e11924b1.yaml
@@ -0,0 +1,12 @@
+---
+enhancements:
+ - |
+ Allow code instrumentation to also trace the input and output of components.
+ This is useful for debugging and understanding the behavior of components.
+ This behavior is disabled by default and can be enabled with one of the following methods:
+ - Set the environment variable `HAYSTACK_CONTENT_TRACING_ENABLED_ENV_VAR` to `true` before importing Haystack.
+ - Enable content tracing in the code:
+ ```python
+ from haystack import tracing
+ tracing.tracer.is_content_tracing_enabled = True
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/datadog-tracer-b084cf64fcc575c6.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/datadog-tracer-b084cf64fcc575c6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..58d24fe537063829c91f302989b673ea5e6b218c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/datadog-tracer-b084cf64fcc575c6.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ Added out-of-the-box support for the Datadog Tracer. This allows you to instrument pipeline and component
+ runs using Datadog and send traces to your preferred backend.
+
+ To use the Datadog Tracer you need to have the `ddtrace` package installed in your environment.
+ To instruct Haystack to use the Datadog tracer, you have multiple options:
+
+ * Run your Haystack application using the `ddtrace` command line tool as described in the
+ the [ddtrace documentation](https://ddtrace.readthedocs.io/en/stable/installation_quickstart.html#tracing).
+ This behavior can be disabled by setting the `HAYSTACK_AUTO_TRACE_ENABLED_ENV_VAR` environment variable to `false`.
+ * Configure the tracer manually in your code using the `ddtrace` package:
+ ```python
+ from haystack.tracing import DatadogTracer
+ import haystack.tracing
+ import ddtrace
+
+ tracer = ddtrace.tracer
+ tracing.enable_tracing(DatadogTracer(tracer))
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/delete-old-eval-api-d83b3b0c83f847f3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/delete-old-eval-api-d83b3b0c83f847f3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..232324f27479e267d0fdb7286d8b1cb99564ac1b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/delete-old-eval-api-d83b3b0c83f847f3.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Remove the old evaluation API in favor of a Component based API.
+ We now have `SASEvaluator` and `StatisticalEvaluator` replacing the old API.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b861068a3882243055993f65afce04f7f592afc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Remove `ChatMessage.to_openai_format` method. Use `haystack.components.generators.openai_utils._convert_message_to_openai_format` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-debug-path-976bb5a851365233.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-debug-path-976bb5a851365233.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e80ab8aa9303d11d055e9956714a6bde6a1284e8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-debug-path-976bb5a851365233.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - |
+ `Pipeline` init argument `debug_path` is deprecated and will be remove in version `2.7.0`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-greedy-argument-4b8c39572f5df25c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-greedy-argument-4b8c39572f5df25c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c868a9a1930eb35a69c75d7933ae8a8b8826437e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-greedy-argument-4b8c39572f5df25c.yaml
@@ -0,0 +1,13 @@
+---
+enhancements:
+ - |
+ Add new `GreedyVariadic` input type. This has a similar behaviour to `Variadic` input type
+ as it can be connected to multiple output sockets, though the Pipeline will run it as soon
+ as it receives an input without waiting for others.
+ This replaces the `is_greedy` argument in the `@component` decorator.
+ If you had a Component with a `Variadic` input type and `@component(is_greedy=True)` you need
+ to change the type to `GreedyVariadic` and remove `is_greedy=true` from `@component`.
+deprecations:
+ - |
+ `@component` decorator `is_greedy` argument is deprecated and will be removed in version `2.7.0`.
+ Use `GreedyVariadic` type instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-legacy-filters-7c9530644d972089.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-legacy-filters-7c9530644d972089.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eeb29132f64509d63a0cacfd0cc15c72e782a06e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-legacy-filters-7c9530644d972089.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - |
+ Haystack 1.x legacy filters are deprecated and will be removed in a future release. Please use the new
+ filter style as described in the documentation - https://docs.haystack.deepset.ai/docs/metadata-filtering
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-method-to-openai-format-fb2538cf3f610d2a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-method-to-openai-format-fb2538cf3f610d2a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f42c7ac92d27d4cfc2a2ca49d69cd76831b27bad
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-method-to-openai-format-fb2538cf3f610d2a.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ Deprecate the method `to_openai_format` of the `ChatMessage` dataclass.
+ This method was never intended to be public and was only used internally.
+ Now, each Chat Generator will know internally how to convert the messages to
+ the format of their specific provider.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-self-connection-665647384ae2792b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-self-connection-665647384ae2792b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a19a292dc25937c4b2926a5008168e910a6598c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-self-connection-665647384ae2792b.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - |
+ Deprecate connecting a Component to itself when calling `Pipeline.connect()`, it will raise an error from version `2.7.0` onwards
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-sentence-window-retrieval-9d7f8a61429a514b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-sentence-window-retrieval-9d7f8a61429a514b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0bcc5bf533d1434df764d9509e5d571dfdce5da7
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate-sentence-window-retrieval-9d7f8a61429a514b.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - |
+ `SentenceWindowRetrieval` is deprecated and will be removed in future. Use `SentenceWindowRetriever` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecate_max_loops_allowed-2b17a9b442b68199.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate_max_loops_allowed-2b17a9b442b68199.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6cce5d86484f3bd3e9c4cf202cd72eab1f051aa1
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecate_max_loops_allowed-2b17a9b442b68199.yaml
@@ -0,0 +1,12 @@
+---
+enhancements:
+ - |
+ Add new `Pipeline` init argument `max_runs_per_component`, this has the same identical
+ behaviour as the existing `max_loops_allowed` argument but is more descriptive of its actual effects.
+ - |
+ Add new `PipelineMaxLoops` to reflect new `max_runs_per_component` init argument
+deprecations:
+ - |
+ `Pipeline` init argument `max_loops_allowed` is deprecated and will be remove in version `2.7.0`. Use `max_runs_per_component` instead.
+ - |
+ `PipelineMaxLoops` exception is deprecated and will be remove in version `2.7.0`. Use `PipelineMaxComponentRuns` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/deprecated-gpt-3.5-as-default-model-d03cafe1fdc2ada2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/deprecated-gpt-3.5-as-default-model-d03cafe1fdc2ada2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6dd7ba5dff13668bc8fa529ccc0408f839b80e44
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/deprecated-gpt-3.5-as-default-model-d03cafe1fdc2ada2.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - |
+ The 'gpt-3.5-turbo' as the default model for the `OpenAIGenerator` and `OpenAIChatGenerator` will be replaced by 'gpt-4o-mini'.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/docstore-deserialization-in-init-params-a123a39d5fbc957f.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/docstore-deserialization-in-init-params-a123a39d5fbc957f.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1582b9b74c3630decfd271a3dc9d2e96e59644da
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/docstore-deserialization-in-init-params-a123a39d5fbc957f.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Introduce an utility function to deserialize a generic Document Store
+ from the init_parameters of a serialized component.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/document-ndcg-evaluator-d579f51dd76ae76a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/document-ndcg-evaluator-d579f51dd76ae76a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eae5d3207981771aa8d00d571304b6347f6f412a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/document-ndcg-evaluator-d579f51dd76ae76a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Added a new component DocumentNDCGEvaluator, which is similar to DocumentMRREvaluator and useful for retrieval evaluation. It calculates the normalized discounted cumulative gain, an evaluation metric useful when there are multiple ground truth relevant documents and the order in which they are retrieved is important.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/docx-extract-page-breaks-09279c95f73a06f5.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/docx-extract-page-breaks-09279c95f73a06f5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8aacfeaf0c18cd83025936147b3de2eaa0332e4f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/docx-extract-page-breaks-09279c95f73a06f5.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixed an issue where page breaks were not being extracted from DOCX files.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/docx-para-forwardref-31941f54ab3b679f.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/docx-para-forwardref-31941f54ab3b679f.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..46dc52bb059676bb808fcf66cc134db1b7db33bb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/docx-para-forwardref-31941f54ab3b679f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Use a forward reference for the `Paragraph` class in the `DOCXToDocument` converter
+ to prevent import errors.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/docxmetadata-json-serializable-e4205ce70d042aed.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/docxmetadata-json-serializable-e4205ce70d042aed.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a08095bf6aec8de6d062ea8641ed73af85003c0
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/docxmetadata-json-serializable-e4205ce70d042aed.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ The metadata produced by `DOCXToDocument` component is now JSON serializable.
+ Previously, it contained `datetime` objects automatically extracted from DOCX files, which are not JSON serializable.
+ Now, the `datetime` objects are converted to strings.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/dynamic-chat-prompt-templating-enhancement-e8d36cbf3be46da7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/dynamic-chat-prompt-templating-enhancement-e8d36cbf3be46da7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a94b688a17c4166a09b7952fb20afb86c657ce29
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/dynamic-chat-prompt-templating-enhancement-e8d36cbf3be46da7.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Enhanced DynamicChatPromptBuilder's capabilities by allowing all user and system messages to be templated with provided variables. This update ensures a more versatile and dynamic templating process, making chat prompt generation more efficient and customized to user needs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/enforce-python-types-serde-0c5cd0a716bab7d0.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/enforce-python-types-serde-0c5cd0a716bab7d0.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..79e3db407425ff1a8f454173e8517769055470e3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/enforce-python-types-serde-0c5cd0a716bab7d0.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Improved serialization/deserialization errors to provide extra context about the delinquent components when possible.
+
+fixes:
+ - |
+ Serialized data of components are now explicitly enforced to be one of the following basic Python datatypes: str, int, float, bool, list, dict, set, tuple or None.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/enhance-docx-table-extraction-3232d3059d220550.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-docx-table-extraction-3232d3059d220550.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c7c52e0dddad03486921e5dd52ba379d42ca3d5
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-docx-table-extraction-3232d3059d220550.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Enhanced DOCX converter to support table extraction in addition to paragraph content. The converter supports both CSV and Markdown table formats, providing flexible options for representing tabular data extracted from DOCX documents.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/enhance-numpy-dependency-reduced-f8a8afa08be098d3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-numpy-dependency-reduced-f8a8afa08be098d3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc72e68db641cf6114e7519ce7e46dd8dfe0ed8e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-numpy-dependency-reduced-f8a8afa08be098d3.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Reduced numpy usage to speed up imports.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/enhance-pipeline-draw-5fe3131db71f6f54.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-pipeline-draw-5fe3131db71f6f54.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8f586b90fba04d4ccda30b0fd1b66ccf57e72ec4
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-pipeline-draw-5fe3131db71f6f54.yaml
@@ -0,0 +1,7 @@
+---
+enhancements:
+ - |
+ Add new `Pipeline.show()` method to generated image inline if run in a Jupyter notebook.
+ If called outside a notebook it will raise a `PipelineDrawingError`.
+ `Pipeline.draw()` has also been simplified and the `engine` argument has been removed.
+ Now all images will be generated using Mermaid.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9f1914efcf0c3a173af42df6eaed5f1a8003aa2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Customize `Pipeline.__repr__()` to return a nice text representation of it.
+ If run on a Jupyter notebook it will instead have the same behaviour as `Pipeline.show()`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5279d0d9c8814647a981f5d300ffc0ce95b2c969
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new FaithfulnessEvaluator component that can be used to evaluate faithfulness / groundedness / hallucinations of LLMs in a RAG pipeline.
+ Given a question, a list of retrieved document contents (contexts), and a predicted answer, FaithfulnessEvaluator returns a score ranging from 0 (poor faithfulness) to 1 (perfect faithfulness).
+ The score is the proportion of statements in the predicted answer that could by inferred from the documents.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/feat-documentsplitter-add-split-by-function-77501f439b63bb49.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/feat-documentsplitter-add-split-by-function-77501f439b63bb49.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e8b170442abdb39582594bc7e494c29bbbac18af
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/feat-documentsplitter-add-split-by-function-77501f439b63bb49.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added the option to use a custom splitting function in DocumentSplitter. The function must accept a string as
+ input and return a list of strings, representing the split units. To use the feature initialise `DocumentSplitter`
+ with `split_by="function"` providing the custom splitting function as `splitting_function=custom_function`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/feature-regex-mime-type-support-36381e74f593eb84.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/feature-regex-mime-type-support-36381e74f593eb84.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e22b30602537f5ecd8d5ce690623380b30b52f39
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/feature-regex-mime-type-support-36381e74f593eb84.yaml
@@ -0,0 +1,24 @@
+---
+enhancements:
+ - |
+ Enhanced FileTypeRouter with Regex Pattern Support for MIME Types: This introduces a significant enhancement to the `FileTypeRouter`, now featuring support for regex pattern matching for MIME types. This powerful addition allows for more granular control and flexibility in routing files based on their MIME types, enabling the handling of broad categories or specific MIME type patterns with ease. This feature is particularly beneficial for applications requiring sophisticated file classification and routing logic.
+
+ Usage example:
+ ```python
+ from haystack.components.routers import FileTypeRouter
+
+ router = FileTypeRouter(mime_types=[r"text/.*", r"application/(pdf|json)"])
+
+ # Example files to classify
+ file_paths = [
+ Path("document.pdf"),
+ Path("report.json"),
+ Path("notes.txt"),
+ Path("image.png"),
+ ]
+
+ result = router.run(sources=file_paths)
+
+ for mime_type, files in result.items():
+ print(f"MIME Type: {mime_type}, Files: {[str(file) for file in files]}")
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/file-router-additional-mime-types-47fe57e6816b83da.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/file-router-additional-mime-types-47fe57e6816b83da.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d5b8a9d997bec2ac0b23fd9f3597380c358f7e2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/file-router-additional-mime-types-47fe57e6816b83da.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ Added a new parameter `additional_mimetypes` to the FileTypeRouter
+ component.
+
+ This allows users to specify additional MIME type mappings, ensuring
+ correct
+
+ file classification across different runtime environments and Python
+ versions.
+enhancements:
+ - |
+ Improved file type detection in FileTypeRouter, particularly for Microsoft
+ Office file formats like .docx and .pptx. This enhancement ensures more
+ consistent behavior across different environments, including AWS Lambda
+ functions and systems without pre-installed office suites.
+fixes:
+ - |
+ Addressed an issue where certain file types (e.g., .docx, .pptx) were
+ incorrectly classified as 'unclassified' in environments with limited
+ MIME type definitions, such as AWS Lambda functions.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-prompt-builder-from-dict-template-none-56c91effe61e823c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-prompt-builder-from-dict-template-none-56c91effe61e823c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cb1ae0f387d9ed69209f84d8109ec83eec2d294
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-prompt-builder-from-dict-template-none-56c91effe61e823c.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix ChatPromptBuilder from_dict method when template value is None.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-web-pipe-template-0b00144b1099cda3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-web-pipe-template-0b00144b1099cda3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..60b2f90a411c8218748ff812683e54fe774b523b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-chat-web-pipe-template-0b00144b1099cda3.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Update the `CHAT_WITH_WEBSITE` Pipeline template to reflect the changes in
+ the `HTMLToDocument` converter component.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-component-type-checking-issues-8eefa1157ffc3eba.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-component-type-checking-issues-8eefa1157ffc3eba.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a8e3aaf016390e148ab7838ad9f2b68555e63eed
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-component-type-checking-issues-8eefa1157ffc3eba.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Update `Component` protocol to fix type checking issues with some Language Servers.
+ Most Language Servers and some type checkers would show warnings when calling `Pipeline.add_component()`
+ as technically most `Component`s weren't respecting the protocol we defined.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-components-autocompletion-a2d87708309262b6.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-components-autocompletion-a2d87708309262b6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..393abbc0e6fbe17b620bcda457a07c540ecb0846
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-components-autocompletion-a2d87708309262b6.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix auto-complete never working for any Component
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-cleaner-page-tag-15c66d6433b82b0a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-cleaner-page-tag-15c66d6433b82b0a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b25bbb662c98df849dd7ab999fdb3f06166316bd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-cleaner-page-tag-15c66d6433b82b0a.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix the DocumentCleaner removing the `\f` tag from content preventing from counting page number (by Splitter for example).
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-store-in-test-3640b8b079ff4539.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-store-in-test-3640b8b079ff4539.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0f9a65496979586d5e8a95b178bef15522dfc73
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-document-store-in-test-3640b8b079ff4539.yaml
@@ -0,0 +1,9 @@
+---
+
+fixes:
+ - |
+ The `test_comparison_in` test case in the base document store tests used to
+ always pass, no matter how the `in` filtering logic was implemented in
+ document stores. With the fix, the `in` logic is actually tested. Some tests
+ might start to fail for document stores that don't implement the `in` filter
+ correctly.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-linkcontentfetcher-encoding-6c8df3c5b09fbc50.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-linkcontentfetcher-encoding-6c8df3c5b09fbc50.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3dbf59254bfdc2a3f8dc24f1b45420300c4e4c1b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-linkcontentfetcher-encoding-6c8df3c5b09fbc50.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Encoding of HTML files in LinkContentFetcher
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-logger-reserved-attributes-9e4d4920c1e41a62.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-logger-reserved-attributes-9e4d4920c1e41a62.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..841c6fe41233cde0ac94983ab2937c216803415f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-logger-reserved-attributes-9e4d4920c1e41a62.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Remove the usage of reserved keywords in the logger calls, causing a `KeyError` when setting the log level
+ to DEBUG.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-logs-containing-json-1393a00b4904f996.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-logs-containing-json-1393a00b4904f996.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..618dbc78e195c6031068f0dd62ca79d200bae44f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-logs-containing-json-1393a00b4904f996.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixes logs containing JSON data getting lost due to string interpolation.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-66ce13191e596214.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-66ce13191e596214.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e590bcc4ae0752817ec050dbc5d640c2f9690583
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-66ce13191e596214.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix a bug in the `MetaFieldRanker` where the `weight` parameter passed to the `run` method was not being used.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-method-e4e11011a8b99c34.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-method-e4e11011a8b99c34.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ca3b03a543c34d589fac175f6025fc7b5160bdd3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-metafieldranker-weight-in-run-method-e4e11011a8b99c34.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixed a bug in the `MetaFieldRanker`: when the `weight` parameter was set to 0 in the `run` method,
+ the component was incorrectly using the default `weight` parameter set in the `__init__` method.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-openai-base-url-assignment-0570a494d88fe365.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-openai-base-url-assignment-0570a494d88fe365.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6959e7493720f6b3ee979cb46594e64795ecfae9
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-openai-base-url-assignment-0570a494d88fe365.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Adds `api_base_url` attribute to `OpenAITExtEmbedder`.
+ Previously, it was used only for initialization and was not serialized.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-output-adapter-from_dict-custom-filters-none-70705cbb474fe982.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-output-adapter-from_dict-custom-filters-none-70705cbb474fe982.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72759e9c69e964487119f2e3b7ef6fe99557a6f6
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-output-adapter-from_dict-custom-filters-none-70705cbb474fe982.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix Output Adapter from_dict method when custom_filters value is None.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipe-rendering-50261e0472f0d267.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipe-rendering-50261e0472f0d267.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..59bb282f5f6e4a09d38a5a24ee811ced6d594c10
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipe-rendering-50261e0472f0d267.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix the Pipeline visualization issue due to changes in the new release of Mermaid.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipeline-run-disorder-382da1e6bd6db510.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipeline-run-disorder-382da1e6bd6db510.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5255e6038e873652200432561635661187d8734
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-pipeline-run-disorder-382da1e6bd6db510.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes `Pipeline.run()` logic so Components that have all their inputs with a default are run in the correct order.
+ This happened we gather a list of Components to run internally when running the Pipeline in the order they are
+ added during creation of the Pipeline.
+ This caused some Components to run before they received all their inputs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-run-loop-63bf0ffc26887e66.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-run-loop-63bf0ffc26887e66.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9ec3525cd8af1b04c3b5e131c236bb830d87e2fb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-run-loop-63bf0ffc26887e66.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fix a bug in `Pipeline.run()` that would cause it to get stuck in an infinite loop and never return.
+
+ This was caused by Components waiting forever for their inputs when parts of the Pipeline graph are skipped
+ cause of a "decision" Component not returning outputs for that side of the Pipeline.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-sentence-window-retriever-filter-b7b136eedf6e8488.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-sentence-window-retriever-filter-b7b136eedf6e8488.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3e29f416d2626ed8790690a9fc8ec62d445cc535
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-sentence-window-retriever-filter-b7b136eedf6e8488.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixing the filters in the `SentenceWindowRetriever` allowing now support for 3 more DocumentStores: Astra, PGVector, Qdrant
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-tika-page_number-2d600b2dc8a4faa7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-tika-page_number-2d600b2dc8a4faa7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e66f4229585418298308e94c7cd6032cfbe00291
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-tika-page_number-2d600b2dc8a4faa7.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ `TikaDocumentConverter` now returns page breaks ("\f") in the output.
+ This only works for PDF files.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/fix-variadics-not-running-222b01ae44a4a9fc.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/fix-variadics-not-running-222b01ae44a4a9fc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d4f1bdfcf695f448b919c3c78a487143ace1e731
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/fix-variadics-not-running-222b01ae44a4a9fc.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix `Pipeline` not running Components with Variadic input even if it received inputs only from a subset of its senders
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hf-models-from-dict-default-values-47c2c73136ea6643.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hf-models-from-dict-default-values-47c2c73136ea6643.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..71b09ebf7f0ebda4b273ef104d8d567019f216fc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hf-models-from-dict-default-values-47c2c73136ea6643.yaml
@@ -0,0 +1,3 @@
+fixes:
+ - |
+ This updates the components, TransformersSimilarityRanker, SentenceTransformersDiversityRanker, SentenceTransformersTextEmbedder, SentenceTransformersDocumentEmbedder and LocalWhisperTranscriber from_dict methods to work when loading with init_parameters only containing required parameters.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hf-tei-bug-fix-07732c672600aadd.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hf-tei-bug-fix-07732c672600aadd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7fd5fd1787757b51f3bbcac95ba288cbd0f0ce76
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hf-tei-bug-fix-07732c672600aadd.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes `HuggingFaceTEITextEmbedder` returning an embedding of incorrect shape when used with a
+ Text-Embedding-Inference endpoint deployed using Docker.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hfapidocembedder-4c3970d002275edb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hfapidocembedder-4c3970d002275edb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a5db689796619555efbd11a9d46969e60373a99
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hfapidocembedder-4c3970d002275edb.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Introduce `HuggingFaceAPIDocumentEmbedder`.
+ This component can be used to compute Document embeddings using different Hugging Face APIs:
+ - free Serverless Inference API
+ - paid Inference Endpoints
+ - self-hosted Text Embeddings Inference.
+ This embedder will replace the `HuggingFaceTEIDocumentEmbedder` in the future.
+deprecations:
+ - |
+ Deprecate `HuggingFaceTEIDocumentEmbedder`. This component will be removed in Haystack 2.3.0.
+ Use `HuggingFaceAPIDocumentEmbedder` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hfapigen-forwardref-5c06090282557195.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hfapigen-forwardref-5c06090282557195.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a6075e19475fd282f646aa6f30fb090b8e4ad16a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hfapigen-forwardref-5c06090282557195.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Use forward references for Hugging Face Hub types in the `HuggingFaceAPIGenerator` component
+ to prevent import errors.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hfapigenerator-3b1c353a4e8e4c55.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hfapigenerator-3b1c353a4e8e4c55.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7607bf379ace678c66bb6813e86b63a3e81f827b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hfapigenerator-3b1c353a4e8e4c55.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Introduce `HuggingFaceAPIGenerator`. This text-generation component supports different Hugging Face APIs:
+ - free Serverless Inference API
+ - paid Inference Endpoints
+ - self-hosted Text Generation Inference.
+
+ This generator will replace the `HuggingFaceTGIGenerator` in the future.
+deprecations:
+ - |
+ Deprecate `HuggingFaceTGIGenerator`. This component will be removed in Haystack 2.3.0.
+ Use `HuggingFaceAPIGenerator` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/hugging-face-local-generator-streaming-callback-38a77d37199f9672.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/hugging-face-local-generator-streaming-callback-38a77d37199f9672.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..47b1f6776eb2df524459257ac30fa002cdc2e0fb
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/hugging-face-local-generator-streaming-callback-38a77d37199f9672.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds 'streaming_callback' parameter to 'HuggingFaceLocalGenerator', allowing users to handle streaming responses.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/implement-merge-filter-logic-99e6785a78f80ae9.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/implement-merge-filter-logic-99e6785a78f80ae9.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c90479c2c61724a4ca5853b932c44643b641d54a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/implement-merge-filter-logic-99e6785a78f80ae9.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Enhanced filter application logic to support merging of filters. It facilitates more precise retrieval filtering, allowing for both init and runtime complex filter combinations with logical operators. For more details see https://docs.haystack.deepset.ai/docs/metadata-filtering
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/improve-streaming-callbacks-openai-b6c0b108f2de4142.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/improve-streaming-callbacks-openai-b6c0b108f2de4142.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3df9559f64976022901f54ce419688cdbbbbf846
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/improve-streaming-callbacks-openai-b6c0b108f2de4142.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ The `streaming_callback` parameter can be passed to OpenAIGenerator and OpenAIChatGenerator during pipeline run. This prevents the need to recreate pipelines for streaming callbacks.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/internal-logging-changes-a96533da55e309c3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/internal-logging-changes-a96533da55e309c3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2bcf0d6626b3799c72a294230ab19c255efc0c3e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/internal-logging-changes-a96533da55e309c3.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Added a new `Logger` implementation which eases and enforces logging via key-word arguments.
+ This is an internal change only. The behavior of instances created via `logging.getLogger` is not affected.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/introduce-answer-joiner-component-885dd7846776f5cb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/introduce-answer-joiner-component-885dd7846776f5cb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e1773183d3dc8597db0bf0310f2a602453479104
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/introduce-answer-joiner-component-885dd7846776f5cb.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Introduced a new AnswerJoiner component that allows joining multiple lists of Answers into a single list using
+ the Concatenate join mode.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/introduce-jsonschema-validator-65debc51a3b64975.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/introduce-jsonschema-validator-65debc51a3b64975.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..915b24bcdf16a7d5d361a23f99d1239087479ea5
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/introduce-jsonschema-validator-65debc51a3b64975.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Introduced JsonSchemaValidator to validate the JSON content of ChatMessage against a provided JSON schema. Valid messages are emitted through the 'validated' output, while messages failing validation are sent via the 'validation_error' output, along with useful error details for troubleshooting.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/json-converter-a747e9c44543dfb5.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/json-converter-a747e9c44543dfb5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8a269d2c0cded144dc738fec18ffeab5d11aed4
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/json-converter-a747e9c44543dfb5.yaml
@@ -0,0 +1,48 @@
+---
+features:
+ - |
+ Add new `JSONConverter` Component to convert JSON files to `Document`.
+ Optionally it can use jq to filter the source JSON files and extract only specific parts.
+
+ ```python
+ import json
+
+ from haystack.components.converters import JSONConverter
+ from haystack.dataclasses import ByteStream
+
+ data = {
+ "laureates": [
+ {
+ "firstname": "Enrico",
+ "surname": "Fermi",
+ "motivation": "for his demonstrations of the existence of new radioactive elements produced "
+ "by neutron irradiation, and for his related discovery of nuclear reactions brought about by slow neutrons",
+ },
+ {
+ "firstname": "Rita",
+ "surname": "Levi-Montalcini",
+ "motivation": "for their discoveries of growth factors",
+ },
+ ],
+ }
+ source = ByteStream.from_string(json.dumps(data))
+ converter = JSONConverter(
+ jq_schema=".laureates[]", content_key="motivation", extra_meta_fields=["firstname", "surname"]
+ )
+
+ results = converter.run(sources=[source])
+ documents = results["documents"]
+ print(documents[0].content)
+ # 'for his demonstrations of the existence of new radioactive elements produced by
+ # neutron irradiation, and for his related discovery of nuclear reactions brought
+ # about by slow neutrons'
+
+ print(documents[0].meta)
+ # {'firstname': 'Enrico', 'surname': 'Fermi'}
+
+ print(documents[1].content)
+ # 'for their discoveries of growth factors'
+
+ print(documents[1].meta)
+ # {'firstname': 'Rita', 'surname': 'Levi-Montalcini'}
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/language-router-logging-6afed7b6b8a7ae78.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/language-router-logging-6afed7b6b8a7ae78.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bf1815d659db5b500ccfae508b757bff5799f7af
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/language-router-logging-6afed7b6b8a7ae78.yaml
@@ -0,0 +1,12 @@
+---
+security:
+ - |
+ Remove the text value from a warning log in the `TextLanguageRouter` to avoid logging sensitive information.
+ The text can be still be shown by switching to the `debug` log level.
+
+ ```python
+ import logging
+
+ logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING)
+ logging.getLogger("haystack").setLevel(logging.DEBUG)
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/llmevaluator-0ae63b2b9715fb9b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/llmevaluator-0ae63b2b9715fb9b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b68f16a33c5402346f44ef1ebfa44936780925a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/llmevaluator-0ae63b2b9715fb9b.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add a new LLMEvaluator component that leverages LLMs through the OpenAI api to evaluate pipelines.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/log-trace-correlation-78bcf418718b24b4.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/log-trace-correlation-78bcf418718b24b4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2d327dbbd8c0138644c043929317d2b614d3b2a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/log-trace-correlation-78bcf418718b24b4.yaml
@@ -0,0 +1,17 @@
+---
+enhancements:
+ - |
+ If using JSON logging in conjunction with tracing, Haystack will automatically add correlation IDs to the logs.
+ This is done by getting the necessary information from the current span and adding it to the log record.
+ You can customize this by overriding the `get_correlation_data_for_logs` of your tracer's span:
+
+ ```python
+ from haystack.tracing import Span
+
+ class OpenTelemetrySpan(Span):
+ ...
+
+ def get_correlation_data_for_logs(self) -> Dict[str, Any]:
+ span_context = ...
+ return {"trace_id": span_context.trace_id, "span_id": span_context.span_id}
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/logging-tracer-e991a43ef44fe906.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/logging-tracer-e991a43ef44fe906.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c44382db93628fa337ae8ab6a699f6f4e9decdab
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/logging-tracer-e991a43ef44fe906.yaml
@@ -0,0 +1,20 @@
+---
+highlights: >
+ With the new Logging Tracer, users can inspect in the logs everything
+ that is happening in their Pipelines in real time. This feature aims to
+ improve the user experience during experimentation and prototyping.
+features:
+ - |
+ Introduce a Logging Tracer, that sends all traces to the logs.
+
+ It can enabled as follows:
+ ```python
+ import logging
+ from haystack import tracing
+ from haystack.tracing.logging_tracer import LoggingTracer
+
+ logging.basicConfig(format="%(levelname)s - %(name)s - %(message)s", level=logging.WARNING)
+ logging.getLogger("haystack").setLevel(logging.DEBUG)
+ tracing.tracer.is_content_tracing_enabled = True # to enable tracing/logging content (inputs/outputs)
+ tracing.enable_tracing(LoggingTracer())
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/logging-tty-detection-8136769cb4d1da67.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/logging-tty-detection-8136769cb4d1da67.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8493fe964835b5c0a8d6357d429c8ebfe929dece
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/logging-tty-detection-8136769cb4d1da67.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ The `logging` module now detects if the standard output is a TTY. If it is not and `structlog` is installed, it
+ will automatically disable the console renderer and log in JSON format. This behavior can be overridden by setting
+ the environment variable `HAYSTACK_LOGGING_USE_JSON` to `false`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/make-from-dict-more-robust-for-conditionalrouter-c6ed6f841ad8e58d.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/make-from-dict-more-robust-for-conditionalrouter-c6ed6f841ad8e58d.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2cc64cc06448c14591bb406229fc3f8aca92f6c1
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/make-from-dict-more-robust-for-conditionalrouter-c6ed6f841ad8e58d.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ The `from_dict` method of `ConditionalRouter` now correctly handles
+ the case where the `dict` passed to it contains the key `custom_filters` explicitly
+ set to `None`. Previously this was causing an `AttributeError`
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/max-loops-in-run-df9f5c068a723f71.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/max-loops-in-run-df9f5c068a723f71.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..00c6d3a5db2ee2f8d9cc5ca73d2d72dcb579db29
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/max-loops-in-run-df9f5c068a723f71.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Change `Pipeline.run()` to check if `max_loops_allowed` has been reached.
+ If we attempt to run a Component that already ran the number of `max_loops_allowed` a `PipelineMaxLoops` will be raised.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/merge-pipeline-definitions-1da80e9803e2a8bb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/merge-pipeline-definitions-1da80e9803e2a8bb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..094562e8f7974d6f1c9aa30c8598ea099df6ea84
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/merge-pipeline-definitions-1da80e9803e2a8bb.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Merge `Pipeline`s definitions into a single `Pipeline` class.
+ The class in the `haystack.pipeline` package has been deleted and only `haystack.core.pipeline` exists now.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-bytestream-a29816c919c0be5a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-bytestream-a29816c919c0be5a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e116c73fbc381e7c01940fdc8f13fdd50d54143e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-bytestream-a29816c919c0be5a.yaml
@@ -0,0 +1,3 @@
+---
+enhancements:
+ - Add meta parameter to `ByteStream.from_file_path()` and `ByteStream.from_string()`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-filetyperouter-d3cf007f940ce324.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-filetyperouter-d3cf007f940ce324.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e67d0b7f0af323ddb617a735edb7d605ab72b38
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/meta-in-filetyperouter-d3cf007f940ce324.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ The `FiletypeRouter` now supports passing metadata (`meta`) in the `run` method.
+ When metadata is provided, the sources are internally converted to `ByteStream` objects and the metadata is added.
+ This new parameter simplifies working with preprocessing/indexing pipelines.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2be1fb87a4fd7363ea6ca8fc9095fadd4a49fe3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new variable called meta_value_type to the MetaFieldRanker that allows a user to parse the meta value into the data type specified as along as the meta value is a string.
+ The supported values for meta_value_type are '"float"', '"int"', '"date"', or 'None'. If None is passed then no parsing is done.
+ For example, if we specified meta_value_type="date" then for the meta value "date": "2015-02-01" we would parse the string into a datetime object.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/nltk-document-splitting-enhancement-6ef6f59bc277662c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/nltk-document-splitting-enhancement-6ef6f59bc277662c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d97027b30b97a854fc342d589409b97a8d0ed699
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/nltk-document-splitting-enhancement-6ef6f59bc277662c.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Introduced a new NLTK document splitting component, enhancing document preprocessing capabilities. This feature allows for fine-grained control over the splitting of documents into smaller parts based on configurable criteria such as word count, sentence boundaries, and page breaks. It supports multiple languages and offers options for handling sentence boundaries and abbreviations, facilitating better handling of various document types for further processing tasks.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/openai-ttft-42b1ad551b542930.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/openai-ttft-42b1ad551b542930.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bc39b96f7f8c161a118e324bccc7d651e1be5087
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/openai-ttft-42b1ad551b542930.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add TTFT (Time-to-First-Token) support for OpenAI generators. This
+ captures the time taken to generate the first token from the model and
+ can be used to analyze the latency of the application.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/openapi-connector-auth-enhancement-a78e0666d3cf6353.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/openapi-connector-auth-enhancement-a78e0666d3cf6353.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8575bc14f9161694d43275a6e72beb3b947cee9c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/openapi-connector-auth-enhancement-a78e0666d3cf6353.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Enhanced the OpenAPIServiceConnector to support dynamic authentication handling. With this update, service credentials are now dynamically provided at each run invocation, eliminating the need for pre-configuring a known set of service authentications. This flexibility allows for the introduction of new services on-the-fly, each with its unique authentication, streamlining the integration process. This modification not only simplifies the initial setup of the OpenAPIServiceConnector but also ensures a more transparent and straightforward authentication process for each interaction with different OpenAPI services.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/openapi-service-connector-enhancements-21a2bc0a9aab8966.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/openapi-service-connector-enhancements-21a2bc0a9aab8966.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85d82ddfb7166df4b58c1e7b9a092a6a75c62997
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/openapi-service-connector-enhancements-21a2bc0a9aab8966.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Enhanced OpenAPI service connector to better handle method invocation with support for security schemes, refined handling of method arguments including URL/query parameters and request body, alongside improved error validation for method calls. This update enables more versatile interactions with OpenAPI services, ensuring compatibility with a wide range of API specifications.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/opentelemetry-tracer-33d44eb125a3145b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/opentelemetry-tracer-33d44eb125a3145b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aa8a08040128fa89148b4b0d4b6a6f65922aef5f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/opentelemetry-tracer-33d44eb125a3145b.yaml
@@ -0,0 +1,39 @@
+---
+features:
+ - |
+ Added out-of-the-box support for the OpenTelemetry Tracer. This allows you to instrument pipeline and component
+ runs using OpenTelemetry and send traces to your preferred backend.
+
+ To use the OpenTelemetry Tracer you need to have the `opentelemetry-sdk` package installed in your environment.
+ To instruct Haystack to use the OpenTelemetry Tracer, you have multiple options:
+
+ * Run your Haystack application using the `opentelemetry-instrument` command line tool as described in the
+ [OpenTelemetry documentation](https://opentelemetry.io/docs/languages/python/automatic/#configuring-the-agent).
+ This behavior can be disabled by setting the `HAYSTACK_AUTO_TRACE_ENABLED_ENV_VAR` environment variable to `false`.
+ * Configure the tracer manually in your code using the `opentelemetry` package:
+ ```python
+ from opentelemetry import trace
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+ # Service name is required for most backends
+ resource = Resource(attributes={
+ SERVICE_NAME: "haystack"
+ })
+
+ traceProvider = TracerProvider(resource=resource)
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces"))
+ traceProvider.add_span_processor(processor)
+ trace.set_tracer_provider(traceProvider)
+
+ # Auto-configuration
+ import haystack.tracing
+ haystack.tracing.auto_enable_tracing()
+
+ # Or explicitly
+ from haystack.tracing import OpenTelemetryTracer
+
+ tracer = traceProvider.get_tracer("my_application")
+ tracing.enable_tracing(OpenTelemetryTracer(tracer))
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pin-structlog-39d991a70590dd6e.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pin-structlog-39d991a70590dd6e.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f633b3860808199212a58762e61a3235203fe36b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pin-structlog-39d991a70590dd6e.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Pins structlog to <= 24.2.0 to avoid some unit test failures. This is a temporary fix until we can upgrade tests to a newer versions of structlog.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pin-typing-extensions-c4026f59603445b7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pin-typing-extensions-c4026f59603445b7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..88e3ddeae2913f59b9af0a390e6d136db8b3693f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pin-typing-extensions-c4026f59603445b7.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Pin the `typing-extensions` package to versions >= 4.7 to avoid
+ [incompatibilities with the `openai` package](https://community.openai.com/t/error-while-importing-openai-from-open-import-openai/578166/26).
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pip-test-dep-b797879f1d73b3eb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pip-test-dep-b797879f1d73b3eb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..25e874dbd9f9ff12d22c218d7e5ce50e799fa98f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pip-test-dep-b797879f1d73b3eb.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Add `pip` to test dependencies: `mypy` needs it to install missing stub packages.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pipe-disable-autoshow-dbbafd2bfdcce7a4.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pipe-disable-autoshow-dbbafd2bfdcce7a4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3acc6c69328becf8ac017a5f021a8d40ca4f93de
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pipe-disable-autoshow-dbbafd2bfdcce7a4.yaml
@@ -0,0 +1,7 @@
+---
+enhancements:
+ - |
+ In Jupyter notebooks, the image of the Pipeline will no longer be displayed automatically.
+ The textual representation of the Pipeline will be displayed.
+
+ To display the Pipeline image, use the `show` method of the Pipeline object.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pipe-draw-transparent-bg-2e0c8ff586f8e70c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pipe-draw-transparent-bg-2e0c8ff586f8e70c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4a831edb3895fe0be27d831df60e765d315cc17b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pipe-draw-transparent-bg-2e0c8ff586f8e70c.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Restore transparent background for images generated with Pipeline.draw and Pipeline.show
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-deserialization-callbacks-0642248725918684.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-deserialization-callbacks-0642248725918684.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68d1368750cb8566beb800cc79f9411deb97ea32
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-deserialization-callbacks-0642248725918684.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Add support for callbacks during pipeline deserialization. Currently supports a pre-init hook for components that can be used to inspect and modify the initialization parameters
+ before the invocation of the component's `__init__` method.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-run-rework-23a972d83b792db2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-run-rework-23a972d83b792db2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a1fc7d6c75758025f9eb3f4d7d6fcf5d5d882be
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-run-rework-23a972d83b792db2.yaml
@@ -0,0 +1,12 @@
+---
+highlights: >
+ `Pipeline.run()` internal logic has been heavily reworked to be more robust and reliable
+ than before.
+ This new implementation makes it easier to run `Pipeline`s that have cycles in their graph.
+ It also fixes some corner cases in `Pipeline`s that don't have any cycle.
+features:
+ - |
+ Fundamentally rework the internal logic of `Pipeline.run()`.
+ The rework makes it more reliable and covers more use cases.
+ We fixed some issues that made `Pipeline`s with cycles unpredictable
+ and with unclear Components execution order.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-same-input-ref-different-components-68d74cb17b35f8db.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-same-input-ref-different-components-68d74cb17b35f8db.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbb7301fb25401077841392e8ecda4a555c1c9cd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pipeline-same-input-ref-different-components-68d74cb17b35f8db.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Previously, when using the same input reference in different components, the Pipeline run logic had an
+ unexpected behavior. This has been fixed by deepcopying the inputs before passing them to the components.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pptx-import-edc6b8d6274e4023.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pptx-import-edc6b8d6274e4023.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..51458eb6fdfed651829fa870234636fe8abeeb74
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pptx-import-edc6b8d6274e4023.yaml
@@ -0,0 +1,4 @@
+---
+ fixes:
+ - |
+ Correctly expose `PPTXToDocument` component in `haystack` namespace.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/prompt-builder-default-value-95383dd6d17a05d5.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/prompt-builder-default-value-95383dd6d17a05d5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6f70c74648befeee31c3100e2169c745fc69659
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/prompt-builder-default-value-95383dd6d17a05d5.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fix `PromptBuilder` missing input default values.
+ These missing default value was causing the PromptBuilder to never run if certain inputs are not received.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-refactoring-de869c91b42ce5b6.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-refactoring-de869c91b42ce5b6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..464528b64a5dc564310fdb9d86231d1f47b8e929
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-refactoring-de869c91b42ce5b6.yaml
@@ -0,0 +1,12 @@
+---
+enhancements:
+ - |
+ Refactor `PyPDFToDocument` to simplify support for custom PDF converters.
+ PDF converters are classes that implement the `PyPDFConverter` protocol and have 3 methods:
+ `convert`, `to_dict` and `from_dict`.
+ The `DefaultConverter` class is provided as a default implementation.
+deprecations:
+ - |
+ Using the `converter_name` parameter in the `PyPDFToDocument` component is deprecated.
+ It will be removed in the 2.3.0 release.
+ Use the `converter` parameter instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-f33c29830508ee01.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-f33c29830508ee01.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba923793778b64341d476f4b9a44f2c71e17b405
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-f33c29830508ee01.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ The `DefaultConverter` class used by the `PyPDFToDocument` component has been deprecated. Its functionality will be merged into the component in 2.7.0.
+fixes:
+ - |
+ Fix the serialization of `PyPDFToDocument` component to prevent the default converter from being serialized unnecessarily.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-revert-22fcdad91951d3c2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-revert-22fcdad91951d3c2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f087b7ae564d2dc404460d56f281b5fe3bae25b3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/pypdf-serde-fixes-revert-22fcdad91951d3c2.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Revert change to PyPDFConverter that broke the deserialization of pre 2.6.0 YAMLs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/python-3.11-3.12-857850f74f2e1ceb.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/python-3.11-3.12-857850f74f2e1ceb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2f9207f8ce88644038cf1f32ea061a7fd0f09694
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/python-3.11-3.12-857850f74f2e1ceb.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Document Python 3.11 and 3.12 support in project configuration.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/ranker-add-batching-during-inference-f077411ec389a63b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/ranker-add-batching-during-inference-f077411ec389a63b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24e8191ac68762f365cd7d63019dcb6fb8e0bd18
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/ranker-add-batching-during-inference-f077411ec389a63b.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ We added batching during inference time to the TransformerSimilarityRanker to help prevent OOMs when ranking large amounts of Documents.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..128b13e95dabcb0b0efeb71a115db7e2c24c7dd2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `DocumentRecallEvaluator`, a Component that can be used to calculate the Recall single-hit or multi-hit
+ metric given a list of questions, a list of expected documents for each question and the list of predicted
+ documents for each question.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/refactor-document-store-namespace-33f4433a2b121efc.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/refactor-document-store-namespace-33f4433a2b121efc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e003254d764f27e51a1d74dbe7aa6ef417e217a8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/refactor-document-store-namespace-33f4433a2b121efc.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ Change the imports for in_memory document store and retrievers from:
+
+ from haystack.document_stores import InMemoryDocumentStore
+ from haystack.components.retrievers import InMemoryEmbeddingRetriever
+
+ to:
+
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
+ from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
+
+enhancements:
+ - |
+ Stop exposing `in_memory` package symbols in the `haystack.document_store` and
+ `haystack.components.retrievers` root namespaces.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/refactor_deserialize_document_store_in_init_parameters-b7f05173f0c56452.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/refactor_deserialize_document_store_in_init_parameters-b7f05173f0c56452.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8186a6f6027c3cea57d3ed047d1788b6dc8261af
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/refactor_deserialize_document_store_in_init_parameters-b7f05173f0c56452.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Refactor deserialize_document_store_in_init_parameters so that new function name
+ indicates that the operation occurs in place, with no return value.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/release-note-2b603a123cd36214.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/release-note-2b603a123cd36214.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..91db94a58aa5acf2c1887d253e0ba50d06f3a664
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/release-note-2b603a123cd36214.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add `truncate_dim` parameter to Sentence Transformers Embedders, which allows truncating
+ embeddings. Especially useful for models trained with Matryoshka Representation Learning.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/release-note-42273d88ce3e2b2e.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/release-note-42273d88ce3e2b2e.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..094246c604241600f4408dc0a5427d074c4c043e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/release-note-42273d88ce3e2b2e.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add `precision` parameter to Sentence Transformers Embedders, which allows quantized
+ embeddings. Especially useful for reducing the size of the embeddings of a corpus for semantic search, among other tasks.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-debug-path-68e9b2e41d7d43fd.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-debug-path-68e9b2e41d7d43fd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..046a4ece02da289399273f90573ef1828c107cb4
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-debug-path-68e9b2e41d7d43fd.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Removed `Pipeline` init argument `debug_path`. We do not support this anymore.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-multiplexer-1f948f1f9b811195.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-multiplexer-1f948f1f9b811195.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..87f92d21c05c3da489090134b39f51c2ade9bb18
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-multiplexer-1f948f1f9b811195.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ `Multiplexer` is removed and users should switch to `BranchJoiner` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-param-from-pipeline-6b05ce1ff1f7fdec.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-param-from-pipeline-6b05ce1ff1f7fdec.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..783382cb8407e68b5e3a5468de2c3e3150b1e8cc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-param-from-pipeline-6b05ce1ff1f7fdec.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Removed `Pipeline` init argument `max_loops_allowed`. Use `max_runs_per_component` instead.
+ - |
+ Removed `PipelineMaxLoops` exception. Use `PipelineMaxComponentRuns` instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-pipeline-run-debug-eab0c31ea0ce513d.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-pipeline-run-debug-eab0c31ea0ce513d.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45704def4cb19c37692ad10f3f6357703a6cb566
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-pipeline-run-debug-eab0c31ea0ce513d.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Remove unused `debug` parameter from `Pipeline.run` method.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-sentence-window-retrieval-376f1fc000c9fa88.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-sentence-window-retrieval-376f1fc000c9fa88.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8fee636eb3f6cc204afdcb81a98222aec2579b76
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-deprecated-sentence-window-retrieval-376f1fc000c9fa88.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Removing deprecated `SentenceWindowRetrieval`, replaced by `SentenceWindowRetriever`
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-gptgenerator-8eced280d3b720d3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-gptgenerator-8eced280d3b720d3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..694db015eb86fba773edaac486d1692405600d81
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-gptgenerator-8eced280d3b720d3.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Removed the deprecated GPTGenerator and GPTChatGenerator components. Use OpenAIGenerator and OpenAIChatGeneratornotes instead.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-htmltodocument-deprecated-init-params-2f81cf6a3b13710d.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-htmltodocument-deprecated-init-params-2f81cf6a3b13710d.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fa65cf17ecdcecc74301eabde7e0dec22f7b1fac
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-htmltodocument-deprecated-init-params-2f81cf6a3b13710d.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Removed deprecated init parameters `extractor_type` and `try_others` from `HTMLToDocument`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-legacy-filters-ff89cd0a00a64ce9.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-legacy-filters-ff89cd0a00a64ce9.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..da33cd069d77c02c1c9d9742273fa77a77aa2bef
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-legacy-filters-ff89cd0a00a64ce9.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ The legacy filter syntax support has been completely removed. Users need to use the new filter syntax. See the [docs](https://docs.haystack.deepset.ai/docs/metadata-filtering) for more details.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-serialization-from-telemetry-a054f7f26f277d2b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-serialization-from-telemetry-a054f7f26f277d2b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82962d877d3dc8ea31a95019a866655231a0480c
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-serialization-from-telemetry-a054f7f26f277d2b.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fix telemetry code that could cause a ValueError by trying to serialize a pipeline.
+ Telemetry code does not serialize pipelines anymore.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/remove-support-to-connect-component-to-self-6eedfb287f2a2a02.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/remove-support-to-connect-component-to-self-6eedfb287f2a2a02.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18792c2d19ccbaec8da2724ee200fd2ebf93217d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/remove-support-to-connect-component-to-self-6eedfb287f2a2a02.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ `Pipeline.connect()` will now raise a `PipelineConnectError` if `sender` and `receiver` are the same Component.
+ We do not support this use case anymore.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/rename-connect-arguments-2d99d9d5cbe9ab4c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/rename-connect-arguments-2d99d9d5cbe9ab4c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8d242c5b6cb04e63b4dbd5445dc7ec4e5fbf79e0
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/rename-connect-arguments-2d99d9d5cbe9ab4c.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ `Pipeline.connect()` arguments have renamed for clarity. This is a breaking change if `connect` was called
+ with keyword arguments only.
+ `connect_from` and `connect_to` arguments have been renamed respectively to `sender` and `receiver`.
+ The behaviour of `Pipeline.connect()` is not changed.
+features:
+ - |
+ Rename `Pipeline.connect()` arguments for clarity
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/rename-sentence-window-retrieval-be4cd6e1d18ef10e.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/rename-sentence-window-retrieval-be4cd6e1d18ef10e.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..098f6a58c992bd271c726307e60b6574f88e1809
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/rename-sentence-window-retrieval-be4cd6e1d18ef10e.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ `SentenceWindowRetrieval` component has been renamed to `SenetenceWindowRetriever`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ccba3395291faf07c2c32a49d86c9e370fd822cd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+enhancements:
+ - |
+ The default in `default_streaming_callback` was confusing, this function was the go-to-helper
+ one would use to quickly print the generated tokens as they come, but it was not used by default.
+ The function was then renamed to `print_streaming_chunk.`
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/retries-and-timeout-for-AzureOpenAIDocumentEmbedder-006fd84204942e43.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/retries-and-timeout-for-AzureOpenAIDocumentEmbedder-006fd84204942e43.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a4687dcbbb9c57ff210156c0466b2ad1b8e798c0
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/retries-and-timeout-for-AzureOpenAIDocumentEmbedder-006fd84204942e43.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Add `max_retries`, `timeout` parameters to the `AzureOpenAIDocumentEmbedder` initialization.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/rm-serialize-callback-handler-5c104eafc6673932.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/rm-serialize-callback-handler-5c104eafc6673932.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d830c014b800eda1dd354161fcb8c61b9d483b5a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/rm-serialize-callback-handler-5c104eafc6673932.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ The `serialize_callback_handler` and `deserialize_callback_handler` utility functions have been removed.
+ Use `serialize_callable` and `deserialize_callable` instead.
+
+ For more information on `serialize_callable` and `deserialize_callable`, see the API reference:
+ https://docs.haystack.deepset.ai/reference/utils-api#module-callable_serialization
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/run-greedy-fix-6d4559126e7739ce.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/run-greedy-fix-6d4559126e7739ce.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d597121f7c91f986b49a47c4cf1e218fa2e3c6fe
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/run-greedy-fix-6d4559126e7739ce.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix `Pipeline.run()` mistakenly running a Component before it should.
+ This can happen when a greedy variadic Component must be executed before a
+ Component with default inputs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sas-evaluator-7858ea6c38f80bc7.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sas-evaluator-7858ea6c38f80bc7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7447437d89754553cc1879511a8a8bf90cb860b3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sas-evaluator-7858ea6c38f80bc7.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add `SASEvaluator`, this Component can be used to calculate the Semantic Answer Similarity of answers returned by LLMs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0548d9d269f15b394fc1e62725fa7f1bc858ea38
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
@@ -0,0 +1,40 @@
+---
+features:
+ - |
+ Expose a `Secret` type to provide consistent API for any component that requires secrets for authentication.
+ Currently supports string tokens and environment variables. Token-based secrets are automatically
+ prevented from being serialized to disk (to prevent accidental leakage of secrets).
+ ```python
+ @component
+ class MyComponent:
+ def __init__(self, api_key: Optional[Secret] = None, **kwargs):
+ self.api_key = api_key
+ self.backend = None
+
+ def warm_up(self):
+ # Call resolve_value to yield a single result. The semantics of the result is policy-dependent.
+ # Currently, all supported policies will return a single string token.
+ self.backend = SomeBackend(api_key=self.api_key.resolve_value() if self.api_key else None, ...)
+
+ def to_dict(self):
+ # Serialize the policy like any other (custom) data. If the policy is token-based, it will
+ # raise an error.
+ return default_to_dict(self, api_key=self.api_key.to_dict() if self.api_key else None, ...)
+
+ @classmethod
+ def from_dict(cls, data):
+ # Deserialize the policy data before passing it to the generic from_dict function.
+ api_key_data = data["init_parameters"]["api_key"]
+ api_key = Secret.from_dict(api_key_data) if api_key_data is not None else None
+ data["init_parameters"]["api_key"] = api_key
+ return default_from_dict(cls, data)
+
+ # No authentication.
+ component = MyComponent(api_key=None)
+ # Token based authentication
+ component = MyComponent(api_key=Secret.from_token("sk-randomAPIkeyasdsa32ekasd32e"))
+ component.to_dict() # Error! Can't serialize authentication tokens
+ # Environment variable based authentication
+ component = MyComponent(api_key=Secret.from_env("OPENAI_API_KEY"))
+ component.to_dict() # This is fine
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformer-doc-embedder-config_kwargs-d7d254c6b94887c4.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformer-doc-embedder-config_kwargs-d7d254c6b94887c4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8693b75a5083cbd9f082649c56ee5fb6ed411ebd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformer-doc-embedder-config_kwargs-d7d254c6b94887c4.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ SentenceTransformersDocumentEmbedder now supports config_kwargs for additional parameters when loading the model configuration
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-embedders-new-devices-mgmt-07cb59c6b4a13280.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-embedders-new-devices-mgmt-07cb59c6b4a13280.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19a8223a5f7d12f77f4c2aeb809b2295242e01b2
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-embedders-new-devices-mgmt-07cb59c6b4a13280.yaml
@@ -0,0 +1,20 @@
+---
+upgrade:
+ - |
+ Adopt the new framework-agnostic device management in Sentence Transformers Embedders.
+
+ Before this change:
+ ```python
+ from haystack.components.embedders import SentenceTransformersTextEmbedder
+ embedder = SentenceTransformersTextEmbedder(device="cuda:0")
+ ```
+
+ After this change:
+ ```python
+ from haystack.utils.device import ComponentDevice, Device
+ from haystack.components.embedders import SentenceTransformersTextEmbedder
+ device = ComponentDevice.from_single(Device.gpu(id=0))
+ # or
+ # device = ComponentDevice.from_str("cuda:0")
+ embedder = SentenceTransformersTextEmbedder(device=device)
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-text-embedder-config_kwargs-11f10429e25a3a6e.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-text-embedder-config_kwargs-11f10429e25a3a6e.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9409b09e6d51f489db378e71c14a2ddd213b5a2d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-transformers-text-embedder-config_kwargs-11f10429e25a3a6e.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ SentenceTransformersTextEmbedder now supports config_kwargs for additional parameters when loading the model configuration
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sentence-window-retriever-output-docs-d3de2ac4328488f1.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-window-retriever-output-docs-d3de2ac4328488f1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7e86f75ecb1e763802d2f23f9318d5e0a63c9c42
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sentence-window-retriever-output-docs-d3de2ac4328488f1.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ The SentenceWindowRetriever has now an extra output key containing all the documents belonging to the context window.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/single-meta-in-tikaconverter-89b454c451a2ed93.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/single-meta-in-tikaconverter-89b454c451a2ed93.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fcd1671ad5368b8f7d9ea095e0697e50da44fa6d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/single-meta-in-tikaconverter-89b454c451a2ed93.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds support for single metadata dictionary input in `TikaDocumentConverter`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..455647b550e6063a8e258aedef7a28b401726402
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Introduce a new `SparseEmbedding` class which can be used to store a sparse
+ vector representation of a Document.
+ It will be instrumental to support Sparse Embedding Retrieval with
+ the subsequent introduction of Sparse Embedders and Sparse Embedding Retrievers.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/statistical-evaluator-d65b80e3ac24778a.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/statistical-evaluator-d65b80e3ac24778a.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7da31e250e77256d166859439438c79b83606188
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/statistical-evaluator-d65b80e3ac24778a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add `StatisticalEvaluator`, this Component can be used to calculate the different statistic metrics from answers returned by LLMs.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/structured-logging-2d6cef3fee2b4f0e.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/structured-logging-2d6cef3fee2b4f0e.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..583dea24778ca37cd85aa28b789ecaf93972644b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/structured-logging-2d6cef3fee2b4f0e.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ Haystack now supports structured logging out-of-the box.
+ Logging can be separated into 3 categories:
+ - If [`structlog`](https://www.structlog.org/en/stable/) is not installed, Haystack will
+ use the standard Python logging library with whatever configuration is present.
+ - If `structlog` is installed, Haystack will log through [`structlog`](https://www.structlog.org/en/stable/) using
+ structlog's console renderer.
+ To disable structlog, set the environment variable `HAYSTACK_LOGGING_IGNORE_STRUCTLOG_ENV_VAR` to `true`.
+ - To log in JSON, install [`structlog`](https://www.structlog.org/en/stable/) and
+ - set the environment variable `HAYSTACK_LOGGING_JSON` to `true` or
+ - enable JSON logging from Python
+ ```python
+ import haystack.logging
+
+ haystack.logging.configure_logging(use_json=True)
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/support-ChatMessage-in-AnswerBuilder-d7958a72d3786f08.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/support-ChatMessage-in-AnswerBuilder-d7958a72d3786f08.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eb3ddfd3ddd6ab0a51b8034153504afd8be7e732
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/support-ChatMessage-in-AnswerBuilder-d7958a72d3786f08.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ AnswerBuilder can now accept ChatMessages as input in addition to strings. When using ChatMessages, metadata will be automatically added to the answer.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/timeout-and-retries-for-AzureOpenAITextEmbedder-b02b760e9d6f28aa.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/timeout-and-retries-for-AzureOpenAITextEmbedder-b02b760e9d6f28aa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0f30a4738cddce63de24b37b8fbc78955d6db705
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/timeout-and-retries-for-AzureOpenAITextEmbedder-b02b760e9d6f28aa.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Add `max_retries` and `timeout` parameters to the AzureOpenAITextEmbedder initializations.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/transformer-router-from-dict-fix-04cb41b38ca61043.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/transformer-router-from-dict-fix-04cb41b38ca61043.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cd7d5ce6203f68e45c688e54600cfb2dbb52e82
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/transformer-router-from-dict-fix-04cb41b38ca61043.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix `TransformersZeroShotTextRouter` and `TransformersTextRouter` `from_dict` methods to work when `init_parameters` only contain required variables.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/unpin-numpy-84575284e844628b.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/unpin-numpy-84575284e844628b.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9c7240cdd0e3610dd1faed91610ad54a5ad30c47
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/unpin-numpy-84575284e844628b.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Previously, numpy was pinned to <2.0 to avoid compatibility issues in several core integrations.
+ This pin has been removed, and haystack can work with both numpy 1.x and 2.x.
+ If necessary, we will pin numpy version in specific core integrations that require it.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/unsafe-behaviour-e8b41d957113e0c3.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/unsafe-behaviour-e8b41d957113e0c3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d6242432e3134a734063b00d3ebcb17d772c898b
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/unsafe-behaviour-e8b41d957113e0c3.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Add `unsafe` argument to enable behaviour that could lead to remote code execution in `ConditionalRouter` and `OutputAdapter`.
+ By default unsafe behaviour is not enabled, the user must set it explicitly to `True`.
+ This means that user types like `ChatMessage`, `Document`, and `Answer` can be used as output types when `unsafe` is `True`.
+ We recommend using `unsafe` behaviour only when the Jinja templates source is trusted.
+ For more info see the documentation for [`ConditionalRouter`](https://docs.haystack.deepset.ai/docs/conditionalrouter#unsafe-behaviour) and [`OutputAdapter`](https://docs.haystack.deepset.ai/docs/outputadapter#unsafe-behaviour)
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/update-max-seq-lenght-st-1dc3d7a9c9a3bdcd.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/update-max-seq-lenght-st-1dc3d7a9c9a3bdcd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a5595ee6cfbc73c86710024bc5aa4e1307d5667
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/update-max-seq-lenght-st-1dc3d7a9c9a3bdcd.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Updates SentenceTransformersDocumentEmbedder and SentenceTransformersTextEmbedder so model_max_length passed through tokenizer_kwargs also updates the max_seq_length of the underly SentenceTransformer model.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/update-searchapi-new-format-74d8794a8a6f5581.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/update-searchapi-new-format-74d8794a8a6f5581.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7c6a40d72cbbdc7d634c02f0298a1e59bb4eaabc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/update-searchapi-new-format-74d8794a8a6f5581.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Updated the SearchApiWebSearch component with new search format and allowed users to specify the search engine via the `engine`
+ parameter in `search_params`. The default search engine is Google, making it easier for users to tailor their web searches.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-handling-in-components-925d4f3c3c9530db.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-handling-in-components-925d4f3c3c9530db.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e91c895264a450f6e8252288367705aab893f1a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-handling-in-components-925d4f3c3c9530db.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Update secret handling for components using the `Secret` type. The following components are affected:
+ `RemoteWhisperTranscriber`, `AzureOCRDocumentConverter`, `AzureOpenAIDocumentEmbedder`, `AzureOpenAITextEmbedder`, `HuggingFaceTEIDocumentEmbedder`, `HuggingFaceTEITextEmbedder`, `OpenAIDocumentEmbedder`, `SentenceTransformersDocumentEmbedder`, `SentenceTransformersTextEmbedder`, `AzureOpenAIGenerator`, `AzureOpenAIChatGenerator`, `HuggingFaceLocalChatGenerator`, `HuggingFaceTGIChatGenerator`, `OpenAIChatGenerator`, `HuggingFaceLocalGenerator`, `HuggingFaceTGIGenerator`, `OpenAIGenerator`, `TransformersSimilarityRanker`, `SearchApiWebSearch`, `SerperDevWebSearch`
+
+ The default init parameters for `api_key`, `token`, `azure_ad_token` have been adjusted to use environment variables wherever possible. The `azure_ad_token_provider` parameter has been removed from Azure-based components. Components based on Hugging
+ Face are now required to either use a token or an environment variable if authentication is required - The on-disk local token file is no longer supported.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-management-extractive-reader-4a8ce8df1f2705f8.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-management-extractive-reader-4a8ce8df1f2705f8.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3d06cd88d934e94d8e4a46ae5293376ec1f7c76
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/update-secret-management-extractive-reader-4a8ce8df1f2705f8.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Update secret handling for the `ExtractiveReader` component using the `Secret` type.
+
+ The default init parameter `token` is now required to either use a token or the environment `HF_API_TOKEN` variable
+ if authentication is required - The on-disk local token file is no longer supported.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/update-sentence-transformers-3-3bca4e3f3ad3e7ba.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/update-sentence-transformers-3-3bca4e3f3ad3e7ba.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c86d952f9362b5b16b13e086ebe00bed88dba000
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/update-sentence-transformers-3-3bca4e3f3ad3e7ba.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Starting from `haystack-ai==2.4.0`, Haystack is compatible with `sentence-transformers>=3.0.0`;
+ earlier versions of `sentence-transformers` are not supported.
+ We are updating the test dependency and the LazyImport messages to reflect that.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/updated-ranking-mode-in-meta-field-ranker-d1f1304cc422b202.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/updated-ranking-mode-in-meta-field-ranker-d1f1304cc422b202.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..965110ae1de49ed74273525f650bb08f040e5b23
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/updated-ranking-mode-in-meta-field-ranker-d1f1304cc422b202.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixed a bug in the `MetaFieldRanker`: when the `ranking_mode` parameter was overridden in the `run` method,
+ the component was incorrectly using the `ranking_mode` parameter set in the `__init__` method.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-hatch-uv-5e5cccdf7450d01f.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-hatch-uv-5e5cccdf7450d01f.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a6a63d35ce9b751023c37fc5936159015cb03f38
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-hatch-uv-5e5cccdf7450d01f.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Upgrade Hatch to 1.13.0 and adopt uv as installer, to speed up the CI.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-tgi-to-new-hf-hub-51fcb3b6122fe020.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-tgi-to-new-hf-hub-51fcb3b6122fe020.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82dba924fb8ceddd26aab6112aed00f360986b6f
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/upgrade-tgi-to-new-hf-hub-51fcb3b6122fe020.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ The `HuggingFaceTGIGenerator` and `HuggingFaceTGIChatGenerator` components have been modified to be compatible with
+ `huggingface_hub>=0.22.0`.
+
+ If you use these components, you may need to upgrade the `huggingface_hub` library.
+ To do this, run the following command in your environment:
+ ```bash
+ pip install "huggingface_hub>=0.22.0"
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/use-document-store-from-dict-db7975d0e0e5e451.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/use-document-store-from-dict-db7975d0e0e5e451.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..89563147f82f93958a69630c10b53f864a02b0fe
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/use-document-store-from-dict-db7975d0e0e5e451.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ For components that support multiple Document Stores, prioritize using the specific `from_dict` class method
+ for deserialization when available. Otherwise, fall back to the generic `default_from_dict` method.
+ This impacts the following generic components: `CacheChecker`, `DocumentWriter`, `FilterRetriever`, and
+ `SentenceWindowRetriever`.
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/whisper-loc-new-devices-0665a24cd92ee4b6.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/whisper-loc-new-devices-0665a24cd92ee4b6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..709529ced188604312ee2f05b31b1c48838872f3
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/whisper-loc-new-devices-0665a24cd92ee4b6.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ Adopt the new framework-agnostic device management in Local Whisper Transcriber.
+
+ Before this change:
+ ```python
+ from haystack.components.audio import LocalWhisperTranscriber
+
+ transcriber = LocalWhisperTranscriber(device="cuda:0")
+ ```
+
+ After this change:
+ ```python
+ from haystack.utils.device import ComponentDevice, Device
+ from haystack.components.audio import LocalWhisperTranscriber
+
+ device = ComponentDevice.from_single(Device.gpu(id=0))
+ # or
+ # device = ComponentDevice.from_str("cuda:0")
+
+ transcriber = LocalWhisperTranscriber(device=device)
+ ```
diff --git a/testbed/deepset-ai__haystack/releasenotes/notes/zero-shot-text-router-f5090589e652197c.yaml b/testbed/deepset-ai__haystack/releasenotes/notes/zero-shot-text-router-f5090589e652197c.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20c158e01e9244ea5d322ed13c8b112ef04dda87
--- /dev/null
+++ b/testbed/deepset-ai__haystack/releasenotes/notes/zero-shot-text-router-f5090589e652197c.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add a Zero Shot Text Router that uses an NLI model from HF to classify texts based on a set of provided labels and routes them based on the label they were classified with.
diff --git a/testbed/deepset-ai__haystack/test/__init__.py b/testbed/deepset-ai__haystack/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1764a6e039233b694403c434fa97c13e847f6ba
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
diff --git a/testbed/deepset-ai__haystack/test/components/audio/__init__.py b/testbed/deepset-ai__haystack/test/components/audio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1764a6e039233b694403c434fa97c13e847f6ba
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/audio/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
diff --git a/testbed/deepset-ai__haystack/test/components/audio/test_whisper_remote.py b/testbed/deepset-ai__haystack/test/components/audio/test_whisper_remote.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f18018e87799202ffc740590463333614fb2fbd
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/audio/test_whisper_remote.py
@@ -0,0 +1,212 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+import os
+import pytest
+
+from haystack import Pipeline
+from haystack.components.audio.whisper_remote import RemoteWhisperTranscriber
+from haystack.components.fetchers import LinkContentFetcher
+from haystack.dataclasses import ByteStream
+from haystack.utils import Secret
+
+
+class TestRemoteWhisperTranscriber:
+ def test_init_no_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ RemoteWhisperTranscriber()
+
+ def test_init_key_env_var(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
+ t = RemoteWhisperTranscriber()
+ assert t.client.api_key == "test_api_key"
+
+ def test_init_key_module_env_and_global_var(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key_2")
+ t = RemoteWhisperTranscriber()
+ assert t.client.api_key == "test_api_key_2"
+
+ def test_init_default(self):
+ transcriber = RemoteWhisperTranscriber(api_key=Secret.from_token("test_api_key"))
+ assert transcriber.client.api_key == "test_api_key"
+ assert transcriber.model == "whisper-1"
+ assert transcriber.organization is None
+ assert transcriber.whisper_params == {"response_format": "json"}
+
+ def test_init_custom_parameters(self):
+ transcriber = RemoteWhisperTranscriber(
+ api_key=Secret.from_token("test_api_key"),
+ model="whisper-1",
+ organization="test-org",
+ api_base_url="test_api_url",
+ language="en",
+ prompt="test-prompt",
+ response_format="json",
+ temperature="0.5",
+ )
+
+ assert transcriber.model == "whisper-1"
+ assert transcriber.api_key == Secret.from_token("test_api_key")
+ assert transcriber.organization == "test-org"
+ assert transcriber.api_base_url == "test_api_url"
+ assert transcriber.whisper_params == {
+ "language": "en",
+ "prompt": "test-prompt",
+ "response_format": "json",
+ "temperature": "0.5",
+ }
+
+ def test_to_dict_default_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
+ transcriber = RemoteWhisperTranscriber()
+ data = transcriber.to_dict()
+ assert data == {
+ "type": "haystack.components.audio.whisper_remote.RemoteWhisperTranscriber",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "model": "whisper-1",
+ "api_base_url": None,
+ "organization": None,
+ "response_format": "json",
+ },
+ }
+
+ def test_to_dict_with_custom_init_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
+ transcriber = RemoteWhisperTranscriber(
+ api_key=Secret.from_env_var("ENV_VAR", strict=False),
+ model="whisper-1",
+ organization="test-org",
+ api_base_url="test_api_url",
+ language="en",
+ prompt="test-prompt",
+ response_format="json",
+ temperature="0.5",
+ )
+ data = transcriber.to_dict()
+ assert data == {
+ "type": "haystack.components.audio.whisper_remote.RemoteWhisperTranscriber",
+ "init_parameters": {
+ "api_key": {"env_vars": ["ENV_VAR"], "strict": False, "type": "env_var"},
+ "model": "whisper-1",
+ "organization": "test-org",
+ "api_base_url": "test_api_url",
+ "language": "en",
+ "prompt": "test-prompt",
+ "response_format": "json",
+ "temperature": "0.5",
+ },
+ }
+
+ def test_from_dict_with_default_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
+
+ data = {
+ "type": "haystack.components.audio.whisper_remote.RemoteWhisperTranscriber",
+ "init_parameters": {
+ "model": "whisper-1",
+ "api_base_url": "https://api.openai.com/v1",
+ "organization": None,
+ "response_format": "json",
+ },
+ }
+
+ transcriber = RemoteWhisperTranscriber.from_dict(data)
+
+ assert transcriber.model == "whisper-1"
+ assert transcriber.organization is None
+ assert transcriber.api_base_url == "https://api.openai.com/v1"
+ assert transcriber.whisper_params == {"response_format": "json"}
+
+ def test_from_dict_with_custom_init_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
+
+ data = {
+ "type": "haystack.components.audio.whisper_remote.RemoteWhisperTranscriber",
+ "init_parameters": {
+ "model": "whisper-1",
+ "organization": "test-org",
+ "api_base_url": "test_api_url",
+ "language": "en",
+ "prompt": "test-prompt",
+ "response_format": "json",
+ "temperature": "0.5",
+ },
+ }
+ transcriber = RemoteWhisperTranscriber.from_dict(data)
+
+ assert transcriber.model == "whisper-1"
+ assert transcriber.organization == "test-org"
+ assert transcriber.api_base_url == "test_api_url"
+ assert transcriber.whisper_params == {
+ "language": "en",
+ "prompt": "test-prompt",
+ "response_format": "json",
+ "temperature": "0.5",
+ }
+
+ def test_from_dict_with_default_parameters_no_env_var(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+
+ data = {
+ "type": "haystack.components.audio.whisper_remote.RemoteWhisperTranscriber",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "model": "whisper-1",
+ "api_base_url": "https://api.openai.com/v1",
+ "organization": None,
+ "response_format": "json",
+ },
+ }
+
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ RemoteWhisperTranscriber.from_dict(data)
+
+ @pytest.mark.skipif(
+ not os.environ.get("OPENAI_API_KEY", None),
+ reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
+ )
+ @pytest.mark.integration
+ def test_whisper_remote_transcriber(self, test_files_path):
+ transcriber = RemoteWhisperTranscriber()
+
+ paths = [
+ test_files_path / "audio" / "this is the content of the document.wav",
+ str(test_files_path / "audio" / "the context for this answer is here.wav"),
+ ByteStream.from_file_path(test_files_path / "audio" / "answer.wav"),
+ ]
+
+ output = transcriber.run(sources=paths)
+
+ docs = output["documents"]
+ assert len(docs) == 3
+ assert docs[0].content.strip().lower() == "this is the content of the document."
+ assert test_files_path / "audio" / "this is the content of the document.wav" == docs[0].meta["file_path"]
+
+ assert docs[1].content.strip().lower() == "the context for this answer is here."
+ assert str(test_files_path / "audio" / "the context for this answer is here.wav") == docs[1].meta["file_path"]
+
+ assert docs[2].content.strip().lower() == "answer."
+
+ @pytest.mark.skipif(
+ not os.environ.get("OPENAI_API_KEY", None),
+ reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
+ )
+ @pytest.mark.integration
+ def test_whisper_remote_transcriber_pipeline_and_url_source(self):
+ pipe = Pipeline()
+ pipe.add_component("fetcher", LinkContentFetcher())
+ pipe.add_component("transcriber", RemoteWhisperTranscriber())
+
+ pipe.connect("fetcher", "transcriber")
+ result = pipe.run(
+ data={
+ "fetcher": {
+ "urls": [
+ "https://github.com/deepset-ai/haystack/raw/refs/heads/main/test/test_files/audio/MLK_Something_happening.mp3"
+ ] # noqa: E501
+ }
+ }
+ )
+ assert "masses of people" in result["transcriber"]["documents"][0].content
diff --git a/testbed/deepset-ai__haystack/test/components/classifiers/test_zero_shot_document_classifier.py b/testbed/deepset-ai__haystack/test/components/classifiers/test_zero_shot_document_classifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d679e3d21d523256739d5a3f5d41534d40a433a
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/classifiers/test_zero_shot_document_classifier.py
@@ -0,0 +1,165 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import pytest
+
+from unittest.mock import patch
+
+from haystack import Document, Pipeline
+from haystack.components.classifiers import TransformersZeroShotDocumentClassifier
+from haystack.components.retrievers import InMemoryBM25Retriever
+from haystack.document_stores.in_memory import InMemoryDocumentStore
+from haystack.utils import ComponentDevice, Secret
+
+
+class TestTransformersZeroShotDocumentClassifier:
+ def test_init(self):
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ assert component.labels == ["positive", "negative"]
+ assert component.token == Secret.from_env_var(["HF_API_TOKEN", "HF_TOKEN"], strict=False)
+ assert component.multi_label is False
+ assert component.pipeline is None
+ assert component.classification_field is None
+
+ def test_to_dict(self):
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ component_dict = component.to_dict()
+ assert component_dict == {
+ "type": "haystack.components.classifiers.zero_shot_document_classifier.TransformersZeroShotDocumentClassifier",
+ "init_parameters": {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "labels": ["positive", "negative"],
+ "token": {"env_vars": ["HF_API_TOKEN", "HF_TOKEN"], "strict": False, "type": "env_var"},
+ "huggingface_pipeline_kwargs": {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "device": ComponentDevice.resolve_device(None).to_hf(),
+ "task": "zero-shot-classification",
+ },
+ },
+ }
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.delenv("HF_API_TOKEN", raising=False)
+ data = {
+ "type": "haystack.components.classifiers.zero_shot_document_classifier.TransformersZeroShotDocumentClassifier",
+ "init_parameters": {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "labels": ["positive", "negative"],
+ "token": {"env_vars": ["HF_API_TOKEN", "HF_TOKEN"], "strict": False, "type": "env_var"},
+ "huggingface_pipeline_kwargs": {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "device": ComponentDevice.resolve_device(None).to_hf(),
+ "task": "zero-shot-classification",
+ },
+ },
+ }
+ component = TransformersZeroShotDocumentClassifier.from_dict(data)
+ assert component.labels == ["positive", "negative"]
+ assert component.pipeline is None
+ assert component.token == Secret.from_dict(
+ {"env_vars": ["HF_API_TOKEN", "HF_TOKEN"], "strict": False, "type": "env_var"}
+ )
+ assert component.huggingface_pipeline_kwargs == {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "device": ComponentDevice.resolve_device(None).to_hf(),
+ "task": "zero-shot-classification",
+ "token": None,
+ }
+
+ def test_from_dict_no_default_parameters(self, monkeypatch):
+ monkeypatch.delenv("HF_API_TOKEN", raising=False)
+ data = {
+ "type": "haystack.components.classifiers.zero_shot_document_classifier.TransformersZeroShotDocumentClassifier",
+ "init_parameters": {"model": "cross-encoder/nli-deberta-v3-xsmall", "labels": ["positive", "negative"]},
+ }
+ component = TransformersZeroShotDocumentClassifier.from_dict(data)
+ assert component.labels == ["positive", "negative"]
+ assert component.pipeline is None
+ assert component.token == Secret.from_dict(
+ {"env_vars": ["HF_API_TOKEN", "HF_TOKEN"], "strict": False, "type": "env_var"}
+ )
+ assert component.huggingface_pipeline_kwargs == {
+ "model": "cross-encoder/nli-deberta-v3-xsmall",
+ "device": ComponentDevice.resolve_device(None).to_hf(),
+ "task": "zero-shot-classification",
+ "token": None,
+ }
+
+ @patch("haystack.components.classifiers.zero_shot_document_classifier.pipeline")
+ def test_warm_up(self, hf_pipeline_mock):
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ component.warm_up()
+ assert component.pipeline is not None
+
+ def test_run_fails_without_warm_up(self):
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ positive_documents = [Document(content="That's good. I like it.")]
+ with pytest.raises(RuntimeError):
+ component.run(documents=positive_documents)
+
+ @patch("haystack.components.classifiers.zero_shot_document_classifier.pipeline")
+ def test_run_fails_with_non_document_input(self, hf_pipeline_mock):
+ hf_pipeline_mock.return_value = " "
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ component.warm_up()
+ text_list = ["That's good. I like it.", "That's bad. I don't like it."]
+ with pytest.raises(TypeError):
+ component.run(documents=text_list)
+
+ @patch("haystack.components.classifiers.zero_shot_document_classifier.pipeline")
+ def test_run_unit(self, hf_pipeline_mock):
+ hf_pipeline_mock.return_value = [
+ {"sequence": "That's good. I like it.", "labels": ["positive", "negative"], "scores": [0.99, 0.01]},
+ {"sequence": "That's bad. I don't like it.", "labels": ["negative", "positive"], "scores": [0.99, 0.01]},
+ ]
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ component.pipeline = hf_pipeline_mock
+ positive_document = Document(content="That's good. I like it.")
+ negative_document = Document(content="That's bad. I don't like it.")
+ result = component.run(documents=[positive_document, negative_document])
+ assert component.pipeline is not None
+ assert result["documents"][0].to_dict()["classification"]["label"] == "positive"
+ assert result["documents"][1].to_dict()["classification"]["label"] == "negative"
+
+ @pytest.mark.integration
+ def test_run(self):
+ component = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+ component.warm_up()
+ positive_document = Document(content="That's good. I like it. " * 1000)
+ negative_document = Document(content="That's bad. I don't like it.")
+ result = component.run(documents=[positive_document, negative_document])
+ assert component.pipeline is not None
+ assert result["documents"][0].to_dict()["classification"]["label"] == "positive"
+ assert result["documents"][1].to_dict()["classification"]["label"] == "negative"
+
+ def test_serialization_and_deserialization_pipeline(self):
+ pipeline = Pipeline()
+ document_store = InMemoryDocumentStore()
+ retriever = InMemoryBM25Retriever(document_store=document_store)
+ document_classifier = TransformersZeroShotDocumentClassifier(
+ model="cross-encoder/nli-deberta-v3-xsmall", labels=["positive", "negative"]
+ )
+
+ pipeline.add_component(instance=retriever, name="retriever")
+ pipeline.add_component(instance=document_classifier, name="document_classifier")
+ pipeline.connect("retriever", "document_classifier")
+ pipeline_dump = pipeline.dumps()
+
+ new_pipeline = Pipeline.loads(pipeline_dump)
+
+ assert new_pipeline == pipeline
diff --git a/testbed/deepset-ai__haystack/test/components/converters/test_csv_to_document.py b/testbed/deepset-ai__haystack/test/components/converters/test_csv_to_document.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8f0bac0f1e7aa75dc21c39c70e2e28a7db9be66
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/converters/test_csv_to_document.py
@@ -0,0 +1,86 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+import logging
+from unittest.mock import patch
+import pandas as pd
+from pathlib import Path
+
+import pytest
+
+from haystack.dataclasses import ByteStream
+from haystack.components.converters.csv import CSVToDocument
+
+
+@pytest.fixture
+def csv_converter():
+ return CSVToDocument()
+
+
+class TestCSVToDocument:
+ def test_init(self, csv_converter):
+ assert isinstance(csv_converter, CSVToDocument)
+
+ def test_run(self, test_files_path):
+ """
+ Test if the component runs correctly.
+ """
+ bytestream = ByteStream.from_file_path(test_files_path / "csv" / "sample_1.csv")
+ bytestream.meta["file_path"] = str(test_files_path / "csv" / "sample_1.csv")
+ bytestream.meta["key"] = "value"
+ files = [bytestream, test_files_path / "csv" / "sample_2.csv", test_files_path / "csv" / "sample_3.csv"]
+ converter = CSVToDocument()
+ output = converter.run(sources=files)
+ docs = output["documents"]
+ assert len(docs) == 3
+ assert "Name,Age\r\nJohn Doe,27\r\nJane Smith,37\r\nMike Johnson,47\r\n" == docs[0].content
+ assert isinstance(docs[0].content, str)
+ assert docs[0].meta == bytestream.meta
+ assert docs[1].meta["file_path"] == str(files[1])
+ assert docs[2].meta["file_path"] == str(files[2])
+
+ def test_run_error_handling(self, test_files_path, caplog):
+ """
+ Test if the component correctly handles errors.
+ """
+ paths = [
+ test_files_path / "csv" / "sample_2.csv",
+ "non_existing_file.csv",
+ test_files_path / "csv" / "sample_3.csv",
+ ]
+ converter = CSVToDocument()
+ with caplog.at_level(logging.WARNING):
+ output = converter.run(sources=paths)
+ assert "non_existing_file.csv" in caplog.text
+ docs = output["documents"]
+ assert len(docs) == 2
+ assert docs[0].meta["file_path"] == str(paths[0])
+
+ def test_encoding_override(self, test_files_path, caplog):
+ """
+ Test if the encoding metadata field is used properly
+ """
+ bytestream = ByteStream.from_file_path(test_files_path / "csv" / "sample_1.csv")
+ bytestream.meta["key"] = "value"
+
+ converter = CSVToDocument(encoding="utf-16-le")
+ output = converter.run(sources=[bytestream])
+ with caplog.at_level(logging.ERROR):
+ output = converter.run(sources=[bytestream])
+ assert "codec can't decode" in caplog.text
+
+ converter = CSVToDocument(encoding="utf-8")
+ output = converter.run(sources=[bytestream])
+ assert "Name,Age\r\n" in output["documents"][0].content
+
+ def test_run_with_meta(self):
+ bytestream = ByteStream(
+ data=b"Name,Age,City\r\nAlice,30,New York\r\nBob,25,Los Angeles\r\nCharlie,35,Chicago\r\n",
+ meta={"name": "test_name", "language": "en"},
+ )
+ converter = CSVToDocument()
+ output = converter.run(sources=[bytestream], meta=[{"language": "it"}])
+ document = output["documents"][0]
+
+ # check that the metadata from the bytestream is merged with that from the meta parameter
+ assert document.meta == {"name": "test_name", "language": "it"}
diff --git a/testbed/deepset-ai__haystack/test/components/evaluators/test_context_relevance_evaluator.py b/testbed/deepset-ai__haystack/test/components/evaluators/test_context_relevance_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd1e33cc8e7e5cb7329da80eb5ad1f064c09862d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/evaluators/test_context_relevance_evaluator.py
@@ -0,0 +1,242 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+import os
+from typing import List
+
+import math
+
+import pytest
+
+from haystack import Pipeline
+from haystack.components.evaluators import ContextRelevanceEvaluator
+from haystack.utils.auth import Secret
+
+
+class TestContextRelevanceEvaluator:
+ def test_init_default(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.instructions == (
+ "Please extract only sentences from the provided context which are absolutely relevant and "
+ "required to answer the following question. If no relevant sentences are found, or if you "
+ "believe the question cannot be answered from the given context, return an empty list, example: []"
+ )
+ assert component.inputs == [("questions", List[str]), ("contexts", List[List[str]])]
+ assert component.outputs == ["relevant_statements"]
+ assert component.examples == [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany?",
+ "contexts": ["Berlin is the capital of Germany. Berlin and was founded in 1244."],
+ },
+ "outputs": {"relevant_statements": ["Berlin is the capital of Germany."]},
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": [
+ "Berlin is the capital of Germany and was founded in 1244.",
+ "Europe is a continent with 44 countries.",
+ "Madrid is the capital of Spain.",
+ ],
+ },
+ "outputs": {"relevant_statements": []},
+ },
+ {
+ "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
+ "outputs": {"relevant_statements": ["Rome is the capital of Italy."]},
+ },
+ ]
+
+ def test_init_fail_wo_openai_api_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ ContextRelevanceEvaluator()
+
+ def test_init_with_parameters(self):
+ component = ContextRelevanceEvaluator(
+ api_key=Secret.from_token("test-api-key"),
+ api="openai",
+ examples=[
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ],
+ )
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.api == "openai"
+ assert component.examples == [
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ]
+
+ def test_to_dict_with_parameters(self, monkeypatch):
+ monkeypatch.setenv("ENV_VAR", "test-api-key")
+ component = ContextRelevanceEvaluator(
+ api="openai",
+ api_key=Secret.from_env_var("ENV_VAR"),
+ examples=[{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}],
+ raise_on_failure=False,
+ progress_bar=False,
+ )
+ data = component.to_dict()
+ assert data == {
+ "type": "haystack.components.evaluators.context_relevance.ContextRelevanceEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["ENV_VAR"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "api_params": {"generation_kwargs": {"response_format": {"type": "json_object"}, "seed": 42}},
+ "examples": [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}],
+ "progress_bar": False,
+ "raise_on_failure": False,
+ },
+ }
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+
+ data = {
+ "type": "haystack.components.evaluators.context_relevance.ContextRelevanceEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "examples": [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}],
+ },
+ }
+ component = ContextRelevanceEvaluator.from_dict(data)
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.examples == [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}]
+
+ pipeline = Pipeline()
+ pipeline.add_component("evaluator", component)
+ assert pipeline.loads(pipeline.dumps())
+
+ def test_run_calculates_mean_score(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"relevant_statements": ["a", "b"], "score": 1}']}
+ else:
+ return {"replies": ['{"relevant_statements": [], "score": 0}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media "
+ "presence, number of participants, and economic impact. Football is undoubtedly the world's most "
+ "popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and "
+ "Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python is design philosophy emphasizes code readability, and its language constructs aim to help "
+ "programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ results = component.run(questions=questions, contexts=contexts)
+
+ print(results)
+
+ assert results == {
+ "results": [{"score": 1, "relevant_statements": ["a", "b"]}, {"score": 0, "relevant_statements": []}],
+ "score": 0.5,
+ "meta": None,
+ "individual_scores": [1, 0],
+ }
+
+ def test_run_no_statements_extracted(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"relevant_statements": ["a", "b"], "score": 1}']}
+ else:
+ return {"replies": ['{"relevant_statements": [], "score": 0}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media "
+ "presence, number of participants, and economic impact. Football is undoubtedly the world's most "
+ "popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and "
+ "Messi, drawing a followership of more than 4 billion people."
+ ],
+ [],
+ ]
+ results = component.run(questions=questions, contexts=contexts)
+ assert results == {
+ "results": [{"score": 1, "relevant_statements": ["a", "b"]}, {"score": 0, "relevant_statements": []}],
+ "score": 0.5,
+ "meta": None,
+ "individual_scores": [1, 0],
+ }
+
+ def test_run_missing_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ with pytest.raises(ValueError, match="LLM evaluator expected input parameter"):
+ component.run()
+
+ def test_run_returns_nan_raise_on_failure_false(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator(raise_on_failure=False)
+
+ def generator_run(self, *args, **kwargs):
+ if "Python" in kwargs["prompt"]:
+ raise Exception("OpenAI API request failed.")
+ else:
+ return {"replies": ['{"relevant_statements": ["c", "d"], "score": 1}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media "
+ "presence, number of participants, and economic impact. Football is undoubtedly the world's most "
+ "popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and "
+ "Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
+ "language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
+ "programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ results = component.run(questions=questions, contexts=contexts)
+
+ assert math.isnan(results["score"])
+ assert results["results"][0] == {"relevant_statements": ["c", "d"], "score": 1}
+ assert results["results"][1]["relevant_statements"] == []
+ assert math.isnan(results["results"][1]["score"])
+
+ @pytest.mark.skipif(
+ not os.environ.get("OPENAI_API_KEY", None),
+ reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
+ )
+ @pytest.mark.integration
+ def test_live_run(self):
+ questions = ["Who created the Python language?"]
+ contexts = [["Python, created by Guido van Rossum, is a high-level general-purpose programming language."]]
+
+ evaluator = ContextRelevanceEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts)
+
+ required_fields = {"results"}
+ assert all(field in result for field in required_fields)
+ nested_required_fields = {"score", "relevant_statements"}
+ assert all(field in result["results"][0] for field in nested_required_fields)
+
+ assert "meta" in result
+ assert "prompt_tokens" in result["meta"][0]["usage"]
+ assert "completion_tokens" in result["meta"][0]["usage"]
+ assert "total_tokens" in result["meta"][0]["usage"]
diff --git a/testbed/deepset-ai__haystack/test/components/evaluators/test_document_mrr.py b/testbed/deepset-ai__haystack/test/components/evaluators/test_document_mrr.py
new file mode 100644
index 0000000000000000000000000000000000000000..88700eed087ac001f69d44238d6ddfca15e24979
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/components/evaluators/test_document_mrr.py
@@ -0,0 +1,85 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+import pytest
+
+from haystack import Document
+from haystack.components.evaluators.document_mrr import DocumentMRREvaluator
+
+
+def test_run_with_all_matching():
+ evaluator = DocumentMRREvaluator()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 1.0], "score": 1.0}
+
+
+def test_run_with_no_matching():
+ evaluator = DocumentMRREvaluator()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [0.0, 0.0], "score": 0.0}
+
+
+def test_run_with_partial_matching():
+ evaluator = DocumentMRREvaluator()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 0.0], "score": 0.5}
+
+
+def test_run_with_complex_data():
+ evaluator = DocumentMRREvaluator()
+ result = evaluator.run(
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ [Document(content="classical music"), Document(content="classical")],
+ [Document(content="11th century"), Document(content="the 11th")],
+ [Document(content="Denmark, Iceland and Norway")],
+ [Document(content="10th century"), Document(content="10th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="10th century"), Document(content="9th century"), Document(content="9th")],
+ [Document(content="rock music"), Document(content="dubstep"), Document(content="classical")],
+ [Document(content="11th"), Document(content="the 11th"), Document(content="11th century")],
+ [Document(content="Denmark"), Document(content="Norway"), Document(content="Iceland")],
+ [
+ Document(content="10th century"),
+ Document(content="the first half of the 10th century"),
+ Document(content="10th"),
+ Document(content="10th"),
+ ],
+ ],
+ )
+
+ assert result == {
+ "individual_scores": [1.0, 0.5, 0.3333333333333333, 0.5, 0.0, 1.0],
+ "score": pytest.approx(0.555555555555555),
+ }
+
+
+def test_run_with_different_lengths():
+ with pytest.raises(ValueError):
+ evaluator = DocumentMRREvaluator()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator = DocumentMRREvaluator()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")]],
+ )
diff --git a/testbed/deepset-ai__haystack/test/conftest.py b/testbed/deepset-ai__haystack/test/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7282be645e0a750cf0a52209693cdba72ac994e
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/conftest.py
@@ -0,0 +1,90 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+from datetime import datetime
+from pathlib import Path
+from test.tracing.utils import SpyingTracer
+from typing import Generator
+from unittest.mock import Mock, patch
+
+import pytest
+from openai.types.chat import ChatCompletion, ChatCompletionMessage
+from openai.types.chat.chat_completion import Choice
+
+from haystack import tracing
+from haystack.testing.test_utils import set_all_seeds
+
+set_all_seeds(0)
+
+# Tracing is disable by default to avoid failures in CI
+tracing.disable_tracing()
+
+
+@pytest.fixture()
+def mock_tokenizer():
+ """
+ Tokenizes the string by splitting on spaces.
+ """
+ tokenizer = Mock()
+ tokenizer.encode = lambda text: text.split()
+ tokenizer.decode = lambda tokens: " ".join(tokens)
+ return tokenizer
+
+
+@pytest.fixture()
+def test_files_path():
+ return Path(__file__).parent / "test_files"
+
+
+@pytest.fixture
+def mock_chat_completion():
+ """
+ Mock the OpenAI API completion response and reuse it for tests
+ """
+ with patch("openai.resources.chat.completions.Completions.create") as mock_chat_completion_create:
+ completion = ChatCompletion(
+ id="foo",
+ model="gpt-4",
+ object="chat.completion",
+ choices=[
+ Choice(
+ finish_reason="stop",
+ logprobs=None,
+ index=0,
+ message=ChatCompletionMessage(content="Hello world!", role="assistant"),
+ )
+ ],
+ created=int(datetime.now().timestamp()),
+ usage={"prompt_tokens": 57, "completion_tokens": 40, "total_tokens": 97},
+ )
+
+ mock_chat_completion_create.return_value = completion
+ yield mock_chat_completion_create
+
+
+@pytest.fixture(autouse=True)
+def request_blocker(request: pytest.FixtureRequest, monkeypatch):
+ """
+ This fixture is applied automatically to all tests.
+ Those that are not marked as integration will have the requests module
+ monkeypatched to avoid making HTTP requests by mistake.
+ """
+ marker = request.node.get_closest_marker("integration")
+ if marker is not None:
+ return
+
+ def urlopen_mock(self, method, url, *args, **kwargs):
+ raise RuntimeError(f"The test was about to {method} {self.scheme}://{self.host}{url}")
+
+ monkeypatch.setattr("urllib3.connectionpool.HTTPConnectionPool.urlopen", urlopen_mock)
+
+
+@pytest.fixture()
+def spying_tracer() -> Generator[SpyingTracer, None, None]:
+ tracer = SpyingTracer()
+ tracing.enable_tracing(tracer)
+
+ yield tracer
+
+ # Make sure to disable tracing after the test to avoid affecting other tests
+ tracing.disable_tracing()
diff --git a/testbed/deepset-ai__haystack/test/core/__init__.py b/testbed/deepset-ai__haystack/test/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1764a6e039233b694403c434fa97c13e847f6ba
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
diff --git a/testbed/deepset-ai__haystack/test/core/component/test_component.py b/testbed/deepset-ai__haystack/test/core/component/test_component.py
new file mode 100644
index 0000000000000000000000000000000000000000..49d3a8bd5309813c827036cce78b308fc38a00d8
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/component/test_component.py
@@ -0,0 +1,547 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+import logging
+from functools import partial
+from typing import Any
+
+import pytest
+
+from haystack.core.component import Component, InputSocket, OutputSocket, component
+from haystack.core.component.component import _hook_component_init
+from haystack.core.component.types import Variadic
+from haystack.core.errors import ComponentError
+from haystack.core.pipeline import Pipeline
+
+
+def test_correct_declaration():
+ @component
+ class MockComponent:
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ # Verifies also instantiation works with no issues
+ assert MockComponent()
+ assert component.registry["test_component.MockComponent"] == MockComponent
+ assert isinstance(MockComponent(), Component)
+ assert MockComponent().__haystack_supports_async__ is False
+
+
+def test_correct_declaration_with_async():
+ @component
+ class MockComponent:
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ @component.output_types(output_value=int)
+ async def run_async(self, input_value: int):
+ return {"output_value": input_value}
+
+ # Verifies also instantiation works with no issues
+ assert MockComponent()
+ assert component.registry["test_component.MockComponent"] == MockComponent
+ assert isinstance(MockComponent(), Component)
+ assert MockComponent().__haystack_supports_async__ is True
+
+
+def test_correct_declaration_with_additional_readonly_property():
+ @component
+ class MockComponent:
+ @property
+ def store(self):
+ return "test_store"
+
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ # Verifies that instantiation works with no issues
+ assert MockComponent()
+ assert component.registry["test_component.MockComponent"] == MockComponent
+ assert MockComponent().store == "test_store"
+
+
+def test_correct_declaration_with_additional_writable_property():
+ @component
+ class MockComponent:
+ @property
+ def store(self):
+ return "test_store"
+
+ @store.setter
+ def store(self, value):
+ self._store = value
+
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ # Verifies that instantiation works with no issues
+ assert component.registry["test_component.MockComponent"] == MockComponent
+ comp = MockComponent()
+ comp.store = "test_store"
+ assert comp.store == "test_store"
+
+
+def test_missing_run():
+ with pytest.raises(ComponentError, match=r"must have a 'run\(\)' method"):
+
+ @component
+ class MockComponent:
+ def another_method(self, input_value: int):
+ return {"output_value": input_value}
+
+
+def test_async_run_not_async():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ def run_async(self, value: int):
+ return {"value": 1}
+
+ with pytest.raises(ComponentError, match=r"must be a coroutine"):
+ comp = MockComponent()
+
+
+def test_async_run_not_coroutine():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ async def run_async(self, value: int):
+ yield {"value": 1}
+
+ with pytest.raises(ComponentError, match=r"must be a coroutine"):
+ comp = MockComponent()
+
+
+def test_parameters_mismatch_run_and_async_run():
+ err_msg = r"Parameters of 'run' and 'run_async' methods must be the same"
+
+ @component
+ class MockComponentMismatchingInputTypes:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ async def run_async(self, value: str):
+ return {"value": "1"}
+
+ with pytest.raises(ComponentError, match=err_msg):
+ comp = MockComponentMismatchingInputTypes()
+
+ @component
+ class MockComponentMismatchingInputs:
+ @component.output_types(value=int)
+ def run(self, value: int, **kwargs):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ async def run_async(self, value: int):
+ return {"value": "1"}
+
+ with pytest.raises(ComponentError, match=err_msg):
+ comp = MockComponentMismatchingInputs()
+
+ @component
+ class MockComponentMismatchingInputOrder:
+ @component.output_types(value=int)
+ def run(self, value: int, another: str):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ async def run_async(self, another: str, value: int):
+ return {"value": "1"}
+
+ with pytest.raises(ComponentError, match=err_msg):
+ comp = MockComponentMismatchingInputOrder()
+
+
+def test_set_input_types():
+ @component
+ class MockComponent:
+ def __init__(self, flag: bool):
+ component.set_input_types(self, value=Any)
+ if flag:
+ component.set_input_type(self, name="another", type=str)
+
+ @component.output_types(value=int)
+ def run(self, **kwargs):
+ return {"value": 1}
+
+ comp = MockComponent(False)
+ assert comp.__haystack_input__._sockets_dict == {"value": InputSocket("value", Any)}
+ assert comp.run() == {"value": 1}
+
+ comp = MockComponent(True)
+ assert comp.__haystack_input__._sockets_dict == {
+ "value": InputSocket("value", Any),
+ "another": InputSocket("another", str),
+ }
+ assert comp.run() == {"value": 1}
+
+
+def test_set_input_types_no_kwarg():
+ @component
+ class MockComponent:
+ def __init__(self, flag: bool):
+ if flag:
+ component.set_input_type(self, name="another", type=str)
+ else:
+ component.set_input_types(self, value=Any)
+
+ @component.output_types(value=int)
+ def run(self, fini: bool):
+ return {"value": 1}
+
+ with pytest.raises(ComponentError, match=r"doesn't have a kwargs parameter"):
+ comp = MockComponent(False)
+
+ with pytest.raises(ComponentError, match=r"doesn't have a kwargs parameter"):
+ comp = MockComponent(True)
+
+
+def test_set_input_types_overrides_run():
+ @component
+ class MockComponent:
+ def __init__(self, state: bool):
+ if state:
+ component.set_input_type(self, name="fini", type=str)
+ else:
+ component.set_input_types(self, fini=Any)
+
+ @component.output_types(value=int)
+ def run(self, fini: bool, **kwargs):
+ return {"value": 1}
+
+ err_msg = "cannot override the parameters of the 'run' method"
+ with pytest.raises(ComponentError, match=err_msg):
+ comp = MockComponent(False)
+
+ with pytest.raises(ComponentError, match=err_msg):
+ comp = MockComponent(True)
+
+
+def test_set_output_types():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ def run(self, value: int):
+ return {"value": 1}
+
+ comp = MockComponent()
+ assert comp.__haystack_output__._sockets_dict == {"value": OutputSocket("value", int)}
+
+
+def test_output_types_decorator_with_compatible_type():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ comp = MockComponent()
+ assert comp.__haystack_output__._sockets_dict == {"value": OutputSocket("value", int)}
+
+
+def test_output_types_decorator_wrong_method():
+ with pytest.raises(ComponentError):
+
+ @component
+ class MockComponent:
+ def run(self, value: int):
+ return {"value": 1}
+
+ @component.output_types(value=int)
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+
+def test_output_types_decorator_and_set_output_types():
+ @component
+ class MockComponent:
+ def __init__(self) -> None:
+ component.set_output_types(self, value=int)
+
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ with pytest.raises(ComponentError, match="Cannot call `set_output_types`"):
+ comp = MockComponent()
+
+
+def test_output_types_decorator_mismatch_run_async_run():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ @component.output_types(value=str)
+ async def run_async(self, value: int):
+ return {"value": "1"}
+
+ with pytest.raises(ComponentError, match=r"Output type specifications .* must be the same"):
+ comp = MockComponent()
+
+
+def test_output_types_decorator_missing_async_run():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ async def run_async(self, value: int):
+ return {"value": "1"}
+
+ with pytest.raises(ComponentError, match=r"Output type specifications .* must be the same"):
+ comp = MockComponent()
+
+
+def test_component_decorator_set_it_as_component():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int):
+ return {"value": 1}
+
+ def to_dict(self):
+ return {}
+
+ @classmethod
+ def from_dict(cls, data):
+ return cls()
+
+ comp = MockComponent()
+ assert isinstance(comp, Component)
+
+
+def test_input_has_default_value():
+ @component
+ class MockComponent:
+ @component.output_types(value=int)
+ def run(self, value: int = 42):
+ return {"value": value}
+
+ comp = MockComponent()
+ assert comp.__haystack_input__._sockets_dict["value"].default_value == 42
+ assert not comp.__haystack_input__._sockets_dict["value"].is_mandatory
+
+
+def test_keyword_only_args():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, *, arg: int):
+ return {"value": arg}
+
+ comp = MockComponent()
+ component_inputs = {name: {"type": socket.type} for name, socket in comp.__haystack_input__._sockets_dict.items()}
+ assert component_inputs == {"arg": {"type": int}}
+
+
+def test_repr():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ comp = MockComponent()
+ assert repr(comp) == f"{object.__repr__(comp)}\nInputs:\n - value: int\nOutputs:\n - value: int"
+
+
+def test_repr_added_to_pipeline():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ pipe = Pipeline()
+ comp = MockComponent()
+ pipe.add_component("my_component", comp)
+ assert repr(comp) == f"{object.__repr__(comp)}\nmy_component\nInputs:\n - value: int\nOutputs:\n - value: int"
+
+
+def test_pre_init_hooking():
+ @component
+ class MockComponent:
+ def __init__(self, pos_arg1, pos_arg2, pos_arg3=None, *, kwarg1=1, kwarg2="string"):
+ self.pos_arg1 = pos_arg1
+ self.pos_arg2 = pos_arg2
+ self.pos_arg3 = pos_arg3
+ self.kwarg1 = kwarg1
+ self.kwarg2 = kwarg2
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ def pre_init_hook(component_class, init_params, expected_params):
+ assert component_class == MockComponent
+ assert init_params == expected_params
+
+ def pre_init_hook_modify(component_class, init_params, expected_params):
+ assert component_class == MockComponent
+ assert init_params == expected_params
+
+ init_params["pos_arg1"] = 2
+ init_params["pos_arg2"] = 0
+ init_params["pos_arg3"] = "modified"
+ init_params["kwarg2"] = "modified string"
+
+ with _hook_component_init(partial(pre_init_hook, expected_params={"pos_arg1": 1, "pos_arg2": 2, "kwarg1": None})):
+ _ = MockComponent(1, 2, kwarg1=None)
+
+ with _hook_component_init(partial(pre_init_hook, expected_params={"pos_arg1": 1, "pos_arg2": 2, "pos_arg3": 0.01})):
+ _ = MockComponent(pos_arg1=1, pos_arg2=2, pos_arg3=0.01)
+
+ with _hook_component_init(
+ partial(pre_init_hook_modify, expected_params={"pos_arg1": 0, "pos_arg2": 1, "pos_arg3": 0.01, "kwarg1": 0})
+ ):
+ c = MockComponent(0, 1, pos_arg3=0.01, kwarg1=0)
+
+ assert c.pos_arg1 == 2
+ assert c.pos_arg2 == 0
+ assert c.pos_arg3 == "modified"
+ assert c.kwarg1 == 0
+ assert c.kwarg2 == "modified string"
+
+
+def test_pre_init_hooking_variadic_positional_args():
+ @component
+ class MockComponent:
+ def __init__(self, *args, kwarg1=1, kwarg2="string"):
+ self.args = args
+ self.kwarg1 = kwarg1
+ self.kwarg2 = kwarg2
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ def pre_init_hook(component_class, init_params, expected_params):
+ assert component_class == MockComponent
+ assert init_params == expected_params
+
+ c = MockComponent(1, 2, 3, kwarg1=None)
+ assert c.args == (1, 2, 3)
+ assert c.kwarg1 is None
+ assert c.kwarg2 == "string"
+
+ with pytest.raises(ComponentError), _hook_component_init(
+ partial(pre_init_hook, expected_params={"args": (1, 2), "kwarg1": None})
+ ):
+ _ = MockComponent(1, 2, kwarg1=None)
+
+
+def test_pre_init_hooking_variadic_kwargs():
+ @component
+ class MockComponent:
+ def __init__(self, pos_arg1, pos_arg2=None, **kwargs):
+ self.pos_arg1 = pos_arg1
+ self.pos_arg2 = pos_arg2
+ self.kwargs = kwargs
+
+ @component.output_types(output_value=int)
+ def run(self, input_value: int):
+ return {"output_value": input_value}
+
+ def pre_init_hook(component_class, init_params, expected_params):
+ assert component_class == MockComponent
+ assert init_params == expected_params
+
+ with _hook_component_init(
+ partial(pre_init_hook, expected_params={"pos_arg1": 1, "kwarg1": None, "kwarg2": 10, "kwarg3": "string"})
+ ):
+ c = MockComponent(1, kwarg1=None, kwarg2=10, kwarg3="string")
+ assert c.pos_arg1 == 1
+ assert c.pos_arg2 is None
+ assert c.kwargs == {"kwarg1": None, "kwarg2": 10, "kwarg3": "string"}
+
+ def pre_init_hook_modify(component_class, init_params, expected_params):
+ assert component_class == MockComponent
+ assert init_params == expected_params
+
+ init_params["pos_arg1"] = 2
+ init_params["pos_arg2"] = 0
+ init_params["some_kwarg"] = "modified string"
+
+ with _hook_component_init(
+ partial(
+ pre_init_hook_modify,
+ expected_params={"pos_arg1": 0, "pos_arg2": 1, "kwarg1": 999, "some_kwarg": "some_value"},
+ )
+ ):
+ c = MockComponent(0, 1, kwarg1=999, some_kwarg="some_value")
+
+ assert c.pos_arg1 == 2
+ assert c.pos_arg2 == 0
+ assert c.kwargs == {"kwarg1": 999, "some_kwarg": "modified string"}
diff --git a/testbed/deepset-ai__haystack/test/core/pipeline/__init__.py b/testbed/deepset-ai__haystack/test/core/pipeline/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1764a6e039233b694403c434fa97c13e847f6ba
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/pipeline/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
diff --git a/testbed/deepset-ai__haystack/test/core/pipeline/features/conftest.py b/testbed/deepset-ai__haystack/test/core/pipeline/features/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..16c7be35ea9d707fc5247a44cbf56e707c342302
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/pipeline/features/conftest.py
@@ -0,0 +1,94 @@
+from dataclasses import dataclass, field
+from typing import Tuple, List, Dict, Any, Set, Union
+from pathlib import Path
+import re
+
+from pytest_bdd import when, then, parsers
+
+from haystack import Pipeline
+
+PIPELINE_NAME_REGEX = re.compile(r"\[(.*)\]")
+
+
+@dataclass
+class PipelineRunData:
+ """
+ Holds the inputs and expected outputs for a single Pipeline run.
+ """
+
+ inputs: Dict[str, Any]
+ include_outputs_from: Set[str] = field(default_factory=set)
+ expected_outputs: Dict[str, Any] = field(default_factory=dict)
+ expected_run_order: List[str] = field(default_factory=list)
+
+
+@dataclass
+class _PipelineResult:
+ """
+ Holds the outputs and the run order of a single Pipeline run.
+ """
+
+ outputs: Dict[str, Any]
+ run_order: List[str]
+
+
+@when("I run the Pipeline", target_fixture="pipeline_result")
+def run_pipeline(
+ pipeline_data: Tuple[Pipeline, List[PipelineRunData]], spying_tracer
+) -> Union[List[Tuple[_PipelineResult, PipelineRunData]], Exception]:
+ """
+ Attempts to run a pipeline with the given inputs.
+ `pipeline_data` is a tuple that must contain:
+ * A Pipeline instance
+ * The data to run the pipeline with
+
+ If successful returns a tuple of the run outputs and the expected outputs.
+ In case an exceptions is raised returns that.
+ """
+ pipeline, pipeline_run_data = pipeline_data[0], pipeline_data[1]
+
+ results: List[_PipelineResult] = []
+
+ for data in pipeline_run_data:
+ try:
+ outputs = pipeline.run(data=data.inputs, include_outputs_from=data.include_outputs_from)
+ run_order = [
+ span.tags["haystack.component.name"]
+ for span in spying_tracer.spans
+ if "haystack.component.name" in span.tags
+ ]
+ results.append(_PipelineResult(outputs=outputs, run_order=run_order))
+ spying_tracer.spans.clear()
+ except Exception as e:
+ return e
+ return [e for e in zip(results, pipeline_run_data)]
+
+
+@then("draw it to file")
+def draw_pipeline(pipeline_data: Tuple[Pipeline, List[PipelineRunData]], request):
+ """
+ Draw the pipeline to a file with the same name as the test.
+ """
+ if m := PIPELINE_NAME_REGEX.search(request.node.name):
+ name = m.group(1).replace(" ", "_")
+ pipeline = pipeline_data[0]
+ graphs_dir = Path(request.config.rootpath) / "test_pipeline_graphs"
+ graphs_dir.mkdir(exist_ok=True)
+ pipeline.draw(graphs_dir / f"{name}.png")
+
+
+@then("it should return the expected result")
+def check_pipeline_result(pipeline_result: List[Tuple[_PipelineResult, PipelineRunData]]):
+ for res, data in pipeline_result:
+ assert res.outputs == data.expected_outputs
+
+
+@then("components ran in the expected order")
+def check_pipeline_run_order(pipeline_result: List[Tuple[_PipelineResult, PipelineRunData]]):
+ for res, data in pipeline_result:
+ assert res.run_order == data.expected_run_order
+
+
+@then(parsers.parse("it must have raised {exception_class_name}"))
+def check_pipeline_raised(pipeline_result: Exception, exception_class_name: str):
+ assert pipeline_result.__class__.__name__ == exception_class_name
diff --git a/testbed/deepset-ai__haystack/test/core/pipeline/features/pipeline_run.feature b/testbed/deepset-ai__haystack/test/core/pipeline/features/pipeline_run.feature
new file mode 100644
index 0000000000000000000000000000000000000000..db064ea2b13b6c50dbd6b61e301f12b7543a5fbc
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/pipeline/features/pipeline_run.feature
@@ -0,0 +1,57 @@
+Feature: Pipeline running
+
+ Scenario Outline: Running a correct Pipeline
+ Given a pipeline
+ When I run the Pipeline
+ Then it should return the expected result
+ And components ran in the expected order
+
+ Examples:
+ | kind |
+ | that has no components |
+ | that is linear |
+ | that is really complex with lots of components, forks, and loops |
+ | that has a single component with a default input |
+ | that has two loops of identical lengths |
+ | that has two loops of different lengths |
+ | that has a single loop with two conditional branches |
+ | that has a component with dynamic inputs defined in init |
+ | that has two branches that don't merge |
+ | that has three branches that don't merge |
+ | that has two branches that merge |
+ | that has different combinations of branches that merge and do not merge |
+ | that has two branches, one of which loops back |
+ | that has a component with mutable input |
+ | that has a component with mutable output sent to multiple inputs |
+ | that has a greedy and variadic component after a component with default input |
+ | that has components added in a different order from the order of execution |
+ | that has a component with only default inputs |
+ | that has a component with only default inputs as first to run and receives inputs from a loop |
+ | that has multiple branches that merge into a component with a single variadic input |
+ | that has multiple branches of different lengths that merge into a component with a single variadic input |
+ | that is linear and returns intermediate outputs |
+ | that has a loop and returns intermediate outputs from it |
+ | that is linear and returns intermediate outputs from multiple sockets |
+ | that has a component with default inputs that doesn't receive anything from its sender |
+ | that has a component with default inputs that doesn't receive anything from its sender but receives input from user |
+ | that has a loop and a component with default inputs that doesn't receive anything from its sender but receives input from user |
+ | that has multiple components with only default inputs and are added in a different order from the order of execution |
+ | that is linear with conditional branching and multiple joins |
+ | that is a simple agent |
+ | that has a variadic component that receives partial inputs |
+ | that has an answer joiner variadic component |
+ | that is linear and a component in the middle receives optional input from other components and input from the user |
+ | that has a loop in the middle |
+ | that has variadic component that receives a conditional input |
+ | that has a string variadic component |
+
+ Scenario Outline: Running a bad Pipeline
+ Given a pipeline
+ When I run the Pipeline
+ Then it must have raised
+
+ Examples:
+ | kind | exception |
+ | that has an infinite loop | PipelineMaxComponentRuns |
+ | that has a component that doesn't return a dictionary | PipelineRuntimeError |
+ | that has a cycle that would get it stuck | PipelineRuntimeError |
diff --git a/testbed/deepset-ai__haystack/test/core/pipeline/features/test_run.py b/testbed/deepset-ai__haystack/test/core/pipeline/features/test_run.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a82cf4dbde667bdc45720b779e44b7e0c92ce2d
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/pipeline/features/test_run.py
@@ -0,0 +1,2226 @@
+import json
+from typing import List, Optional, Dict, Any
+import re
+
+from pytest_bdd import scenarios, given
+import pytest
+
+from haystack import Pipeline, Document, component
+from haystack.document_stores.types import DuplicatePolicy
+from haystack.dataclasses import ChatMessage, GeneratedAnswer
+from haystack.components.routers import ConditionalRouter
+from haystack.components.builders import PromptBuilder, AnswerBuilder, ChatPromptBuilder
+from haystack.components.preprocessors import DocumentCleaner, DocumentSplitter
+from haystack.components.retrievers.in_memory import InMemoryBM25Retriever
+from haystack.document_stores.in_memory import InMemoryDocumentStore
+from haystack.components.joiners import BranchJoiner, DocumentJoiner, AnswerJoiner, StringJoiner
+from haystack.testing.sample_components import (
+ Accumulate,
+ AddFixedValue,
+ Double,
+ Greet,
+ Parity,
+ Repeat,
+ Subtract,
+ Sum,
+ Threshold,
+ Remainder,
+ FString,
+ Hello,
+ TextSplitter,
+ StringListJoiner,
+)
+from haystack.testing.factory import component_class
+
+from test.core.pipeline.features.conftest import PipelineRunData
+
+pytestmark = pytest.mark.integration
+
+scenarios("pipeline_run.feature")
+
+
+@given("a pipeline that has no components", target_fixture="pipeline_data")
+def pipeline_that_has_no_components():
+ pipeline = Pipeline(max_runs_per_component=1)
+ inputs = {}
+ expected_outputs = {}
+ return pipeline, [PipelineRunData(inputs=inputs, expected_outputs=expected_outputs)]
+
+
+@given("a pipeline that is linear", target_fixture="pipeline_data")
+def pipeline_that_is_linear():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_addition", AddFixedValue(add=2))
+ pipeline.add_component("second_addition", AddFixedValue())
+ pipeline.add_component("double", Double())
+ pipeline.connect("first_addition", "double")
+ pipeline.connect("double", "second_addition")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}},
+ expected_outputs={"second_addition": {"result": 7}},
+ expected_run_order=["first_addition", "double", "second_addition"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has an infinite loop", target_fixture="pipeline_data")
+def pipeline_that_has_an_infinite_loop():
+ routes = [
+ {"condition": "{{number > 2}}", "output": "{{number}}", "output_name": "big_number", "output_type": int},
+ {"condition": "{{number <= 2}}", "output": "{{number + 2}}", "output_name": "small_number", "output_type": int},
+ ]
+
+ main_input = BranchJoiner(int)
+ first_router = ConditionalRouter(routes=routes)
+ second_router = ConditionalRouter(routes=routes)
+
+ pipe = Pipeline(max_runs_per_component=1)
+ pipe.add_component("main_input", main_input)
+ pipe.add_component("first_router", first_router)
+ pipe.add_component("second_router", second_router)
+
+ pipe.connect("main_input", "first_router.number")
+ pipe.connect("first_router.big_number", "second_router.number")
+ pipe.connect("second_router.big_number", "main_input")
+
+ return pipe, [PipelineRunData({"main_input": {"value": 3}})]
+
+
+@given("a pipeline that is really complex with lots of components, forks, and loops", target_fixture="pipeline_data")
+def pipeline_complex():
+ pipeline = Pipeline(max_runs_per_component=2)
+ pipeline.add_component("greet_first", Greet(message="Hello, the value is {value}."))
+ pipeline.add_component("accumulate_1", Accumulate())
+ pipeline.add_component("add_two", AddFixedValue(add=2))
+ pipeline.add_component("parity", Parity())
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("accumulate_2", Accumulate())
+
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("below_10", Threshold(threshold=10))
+ pipeline.add_component("double", Double())
+
+ pipeline.add_component("greet_again", Greet(message="Hello again, now the value is {value}."))
+ pipeline.add_component("sum", Sum())
+
+ pipeline.add_component("greet_enumerator", Greet(message="Hello from enumerator, here the value became {value}."))
+ pipeline.add_component("enumerate", Repeat(outputs=["first", "second"]))
+ pipeline.add_component("add_three", AddFixedValue(add=3))
+
+ pipeline.add_component("diff", Subtract())
+ pipeline.add_component("greet_one_last_time", Greet(message="Bye bye! The value here is {value}!"))
+ pipeline.add_component("replicate", Repeat(outputs=["first", "second"]))
+ pipeline.add_component("add_five", AddFixedValue(add=5))
+ pipeline.add_component("add_four", AddFixedValue(add=4))
+ pipeline.add_component("accumulate_3", Accumulate())
+
+ pipeline.connect("greet_first", "accumulate_1")
+ pipeline.connect("accumulate_1", "add_two")
+ pipeline.connect("add_two", "parity")
+
+ pipeline.connect("parity.even", "greet_again")
+ pipeline.connect("greet_again", "sum.values")
+ pipeline.connect("sum", "diff.first_value")
+ pipeline.connect("diff", "greet_one_last_time")
+ pipeline.connect("greet_one_last_time", "replicate")
+ pipeline.connect("replicate.first", "add_five.value")
+ pipeline.connect("replicate.second", "add_four.value")
+ pipeline.connect("add_four", "accumulate_3")
+
+ pipeline.connect("parity.odd", "add_one.value")
+ pipeline.connect("add_one", "branch_joiner.value")
+ pipeline.connect("branch_joiner", "below_10")
+
+ pipeline.connect("below_10.below", "double")
+ pipeline.connect("double", "branch_joiner.value")
+
+ pipeline.connect("below_10.above", "accumulate_2")
+ pipeline.connect("accumulate_2", "diff.second_value")
+
+ pipeline.connect("greet_enumerator", "enumerate")
+ pipeline.connect("enumerate.second", "sum.values")
+
+ pipeline.connect("enumerate.first", "add_three.value")
+ pipeline.connect("add_three", "sum.values")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"greet_first": {"value": 1}, "greet_enumerator": {"value": 1}},
+ expected_outputs={"accumulate_3": {"value": -7}, "add_five": {"result": -6}},
+ expected_run_order=[
+ "greet_first",
+ "greet_enumerator",
+ "accumulate_1",
+ "enumerate",
+ "add_two",
+ "add_three",
+ "parity",
+ "add_one",
+ "branch_joiner",
+ "below_10",
+ "double",
+ "branch_joiner",
+ "below_10",
+ "double",
+ "branch_joiner",
+ "below_10",
+ "accumulate_2",
+ "sum",
+ "diff",
+ "greet_one_last_time",
+ "replicate",
+ "add_five",
+ "add_four",
+ "accumulate_3",
+ ],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a single component with a default input", target_fixture="pipeline_data")
+def pipeline_that_has_a_single_component_with_a_default_input():
+ @component
+ class WithDefault:
+ @component.output_types(b=int)
+ def run(self, a: int, b: int = 2):
+ return {"c": a + b}
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("with_defaults", WithDefault())
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"with_defaults": {"a": 40, "b": 30}},
+ expected_outputs={"with_defaults": {"c": 70}},
+ expected_run_order=["with_defaults"],
+ ),
+ PipelineRunData(
+ inputs={"with_defaults": {"a": 40}},
+ expected_outputs={"with_defaults": {"c": 42}},
+ expected_run_order=["with_defaults"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has two loops of identical lengths", target_fixture="pipeline_data")
+def pipeline_that_has_two_loops_of_identical_lengths():
+ pipeline = Pipeline(max_runs_per_component=10)
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("remainder", Remainder(divisor=3))
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("add_two", AddFixedValue(add=2))
+
+ pipeline.connect("branch_joiner.value", "remainder.value")
+ pipeline.connect("remainder.remainder_is_1", "add_two.value")
+ pipeline.connect("remainder.remainder_is_2", "add_one.value")
+ pipeline.connect("add_two", "branch_joiner.value")
+ pipeline.connect("add_one", "branch_joiner.value")
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 0}},
+ expected_outputs={"remainder": {"remainder_is_0": 0}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 3}},
+ expected_outputs={"remainder": {"remainder_is_0": 3}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 4}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=["branch_joiner", "remainder", "add_two", "branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 5}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=["branch_joiner", "remainder", "add_one", "branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 6}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has two loops of different lengths", target_fixture="pipeline_data")
+def pipeline_that_has_two_loops_of_different_lengths():
+ pipeline = Pipeline(max_runs_per_component=10)
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("remainder", Remainder(divisor=3))
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("add_two_1", AddFixedValue(add=1))
+ pipeline.add_component("add_two_2", AddFixedValue(add=1))
+
+ pipeline.connect("branch_joiner.value", "remainder.value")
+ pipeline.connect("remainder.remainder_is_1", "add_two_1.value")
+ pipeline.connect("add_two_1", "add_two_2.value")
+ pipeline.connect("add_two_2", "branch_joiner")
+ pipeline.connect("remainder.remainder_is_2", "add_one.value")
+ pipeline.connect("add_one", "branch_joiner")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 0}},
+ expected_outputs={"remainder": {"remainder_is_0": 0}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 3}},
+ expected_outputs={"remainder": {"remainder_is_0": 3}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 4}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=[
+ "branch_joiner",
+ "remainder",
+ "add_two_1",
+ "add_two_2",
+ "branch_joiner",
+ "remainder",
+ ],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 5}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=["branch_joiner", "remainder", "add_one", "branch_joiner", "remainder"],
+ ),
+ PipelineRunData(
+ inputs={"branch_joiner": {"value": 6}},
+ expected_outputs={"remainder": {"remainder_is_0": 6}},
+ expected_run_order=["branch_joiner", "remainder"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has a single loop with two conditional branches", target_fixture="pipeline_data")
+def pipeline_that_has_a_single_loop_with_two_conditional_branches():
+ accumulator = Accumulate()
+ pipeline = Pipeline(max_runs_per_component=10)
+
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("below_10", Threshold(threshold=10))
+ pipeline.add_component("below_5", Threshold(threshold=5))
+ pipeline.add_component("add_three", AddFixedValue(add=3))
+ pipeline.add_component("accumulator", accumulator)
+ pipeline.add_component("add_two", AddFixedValue(add=2))
+
+ pipeline.connect("add_one.result", "branch_joiner")
+ pipeline.connect("branch_joiner.value", "below_10.value")
+ pipeline.connect("below_10.below", "accumulator.value")
+ pipeline.connect("accumulator.value", "below_5.value")
+ pipeline.connect("below_5.above", "add_three.value")
+ pipeline.connect("below_5.below", "branch_joiner")
+ pipeline.connect("add_three.result", "branch_joiner")
+ pipeline.connect("below_10.above", "add_two.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 3}},
+ expected_outputs={"add_two": {"result": 13}},
+ expected_run_order=[
+ "add_one",
+ "branch_joiner",
+ "below_10",
+ "accumulator",
+ "below_5",
+ "branch_joiner",
+ "below_10",
+ "accumulator",
+ "below_5",
+ "add_three",
+ "branch_joiner",
+ "below_10",
+ "add_two",
+ ],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a component with dynamic inputs defined in init", target_fixture="pipeline_data")
+def pipeline_that_has_a_component_with_dynamic_inputs_defined_in_init():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("hello", Hello())
+ pipeline.add_component("fstring", FString(template="This is the greeting: {greeting}!", variables=["greeting"]))
+ pipeline.add_component("splitter", TextSplitter())
+ pipeline.connect("hello.output", "fstring.greeting")
+ pipeline.connect("fstring.string", "splitter.sentence")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"hello": {"word": "Alice"}},
+ expected_outputs={"splitter": {"output": ["This", "is", "the", "greeting:", "Hello,", "Alice!!"]}},
+ expected_run_order=["hello", "fstring", "splitter"],
+ ),
+ PipelineRunData(
+ inputs={"hello": {"word": "Alice"}, "fstring": {"template": "Received: {greeting}"}},
+ expected_outputs={"splitter": {"output": ["Received:", "Hello,", "Alice!"]}},
+ expected_run_order=["hello", "fstring", "splitter"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has two branches that don't merge", target_fixture="pipeline_data")
+def pipeline_that_has_two_branches_that_dont_merge():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("parity", Parity())
+ pipeline.add_component("add_ten", AddFixedValue(add=10))
+ pipeline.add_component("double", Double())
+ pipeline.add_component("add_three", AddFixedValue(add=3))
+
+ pipeline.connect("add_one.result", "parity.value")
+ pipeline.connect("parity.even", "add_ten.value")
+ pipeline.connect("parity.odd", "double.value")
+ pipeline.connect("add_ten.result", "add_three.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 1}},
+ expected_outputs={"add_three": {"result": 15}},
+ expected_run_order=["add_one", "parity", "add_ten", "add_three"],
+ ),
+ PipelineRunData(
+ inputs={"add_one": {"value": 2}},
+ expected_outputs={"double": {"value": 6}},
+ expected_run_order=["add_one", "parity", "double"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has three branches that don't merge", target_fixture="pipeline_data")
+def pipeline_that_has_three_branches_that_dont_merge():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("repeat", Repeat(outputs=["first", "second"]))
+ pipeline.add_component("add_ten", AddFixedValue(add=10))
+ pipeline.add_component("double", Double())
+ pipeline.add_component("add_three", AddFixedValue(add=3))
+ pipeline.add_component("add_one_again", AddFixedValue(add=1))
+
+ pipeline.connect("add_one.result", "repeat.value")
+ pipeline.connect("repeat.first", "add_ten.value")
+ pipeline.connect("repeat.second", "double.value")
+ pipeline.connect("repeat.second", "add_three.value")
+ pipeline.connect("add_three.result", "add_one_again.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 1}},
+ expected_outputs={"add_one_again": {"result": 6}, "add_ten": {"result": 12}, "double": {"value": 4}},
+ expected_run_order=["add_one", "repeat", "add_ten", "double", "add_three", "add_one_again"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has two branches that merge", target_fixture="pipeline_data")
+def pipeline_that_has_two_branches_that_merge():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_addition", AddFixedValue(add=2))
+ pipeline.add_component("second_addition", AddFixedValue(add=2))
+ pipeline.add_component("third_addition", AddFixedValue(add=2))
+ pipeline.add_component("diff", Subtract())
+ pipeline.add_component("fourth_addition", AddFixedValue(add=1))
+
+ pipeline.connect("first_addition.result", "second_addition.value")
+ pipeline.connect("second_addition.result", "diff.first_value")
+ pipeline.connect("third_addition.result", "diff.second_value")
+ pipeline.connect("diff", "fourth_addition.value")
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}, "third_addition": {"value": 1}},
+ expected_outputs={"fourth_addition": {"result": 3}},
+ expected_run_order=["first_addition", "third_addition", "second_addition", "diff", "fourth_addition"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has different combinations of branches that merge and do not merge", target_fixture="pipeline_data"
+)
+def pipeline_that_has_different_combinations_of_branches_that_merge_and_do_not_merge():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("add_one", AddFixedValue())
+ pipeline.add_component("parity", Parity())
+ pipeline.add_component("add_ten", AddFixedValue(add=10))
+ pipeline.add_component("double", Double())
+ pipeline.add_component("add_four", AddFixedValue(add=4))
+ pipeline.add_component("add_two", AddFixedValue())
+ pipeline.add_component("add_two_as_well", AddFixedValue())
+ pipeline.add_component("diff", Subtract())
+
+ pipeline.connect("add_one.result", "parity.value")
+ pipeline.connect("parity.even", "add_four.value")
+ pipeline.connect("parity.odd", "double.value")
+ pipeline.connect("add_ten.result", "diff.first_value")
+ pipeline.connect("double.value", "diff.second_value")
+ pipeline.connect("parity.odd", "add_ten.value")
+ pipeline.connect("add_four.result", "add_two.value")
+ pipeline.connect("add_four.result", "add_two_as_well.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 1}, "add_two": {"add": 2}, "add_two_as_well": {"add": 2}},
+ expected_outputs={"add_two": {"result": 8}, "add_two_as_well": {"result": 8}},
+ expected_run_order=["add_one", "parity", "add_four", "add_two", "add_two_as_well"],
+ ),
+ PipelineRunData(
+ inputs={"add_one": {"value": 2}, "add_two": {"add": 2}, "add_two_as_well": {"add": 2}},
+ expected_outputs={"diff": {"difference": 7}},
+ expected_run_order=["add_one", "parity", "double", "add_ten", "diff"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has two branches, one of which loops back", target_fixture="pipeline_data")
+def pipeline_that_has_two_branches_one_of_which_loops_back():
+ pipeline = Pipeline(max_runs_per_component=10)
+ pipeline.add_component("add_zero", AddFixedValue(add=0))
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("sum", Sum())
+ pipeline.add_component("below_10", Threshold(threshold=10))
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("counter", Accumulate())
+ pipeline.add_component("add_two", AddFixedValue(add=2))
+
+ pipeline.connect("add_zero", "branch_joiner.value")
+ pipeline.connect("branch_joiner", "below_10.value")
+ pipeline.connect("below_10.below", "add_one.value")
+ pipeline.connect("add_one.result", "counter.value")
+ pipeline.connect("counter.value", "branch_joiner.value")
+ pipeline.connect("below_10.above", "add_two.value")
+ pipeline.connect("add_two.result", "sum.values")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_zero": {"value": 8}, "sum": {"values": 2}},
+ expected_outputs={"sum": {"total": 23}},
+ expected_run_order=[
+ "add_zero",
+ "branch_joiner",
+ "below_10",
+ "add_one",
+ "counter",
+ "branch_joiner",
+ "below_10",
+ "add_one",
+ "counter",
+ "branch_joiner",
+ "below_10",
+ "add_two",
+ "sum",
+ ],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a component with mutable input", target_fixture="pipeline_data")
+def pipeline_that_has_a_component_with_mutable_input():
+ @component
+ class InputMangler:
+ @component.output_types(mangled_list=List[str])
+ def run(self, input_list: List[str]):
+ input_list.append("extra_item")
+ return {"mangled_list": input_list}
+
+ pipe = Pipeline(max_runs_per_component=1)
+ pipe.add_component("mangler1", InputMangler())
+ pipe.add_component("mangler2", InputMangler())
+ pipe.add_component("concat1", StringListJoiner())
+ pipe.add_component("concat2", StringListJoiner())
+ pipe.connect("mangler1", "concat1")
+ pipe.connect("mangler2", "concat2")
+
+ input_list = ["foo", "bar"]
+
+ return (
+ pipe,
+ [
+ PipelineRunData(
+ inputs={"mangler1": {"input_list": input_list}, "mangler2": {"input_list": input_list}},
+ expected_outputs={
+ "concat1": {"output": ["foo", "bar", "extra_item"]},
+ "concat2": {"output": ["foo", "bar", "extra_item"]},
+ },
+ expected_run_order=["mangler1", "mangler2", "concat1", "concat2"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a component with mutable output sent to multiple inputs", target_fixture="pipeline_data")
+def pipeline_that_has_a_component_with_mutable_output_sent_to_multiple_inputs():
+ @component
+ class PassThroughPromptBuilder:
+ # This is a pass-through component that returns the same input
+ @component.output_types(prompt=List[ChatMessage])
+ def run(self, prompt_source: List[ChatMessage]):
+ return {"prompt": prompt_source}
+
+ @component
+ class MessageMerger:
+ @component.output_types(merged_message=str)
+ def run(self, messages: List[ChatMessage], metadata: dict = None):
+ return {"merged_message": "\n".join(t.content for t in messages)}
+
+ @component
+ class FakeGenerator:
+ # This component is a fake generator that always returns the same message
+ @component.output_types(replies=List[ChatMessage])
+ def run(self, messages: List[ChatMessage]):
+ return {"replies": [ChatMessage.from_assistant("Fake message")]}
+
+ prompt_builder = PassThroughPromptBuilder()
+ llm = FakeGenerator()
+ mm1 = MessageMerger()
+ mm2 = MessageMerger()
+
+ pipe = Pipeline(max_runs_per_component=1)
+ pipe.add_component("prompt_builder", prompt_builder)
+ pipe.add_component("llm", llm)
+ pipe.add_component("mm1", mm1)
+ pipe.add_component("mm2", mm2)
+
+ pipe.connect("prompt_builder.prompt", "llm.messages")
+ pipe.connect("prompt_builder.prompt", "mm1")
+ pipe.connect("llm.replies", "mm2")
+
+ messages = [
+ ChatMessage.from_system("Always respond in English even if some input data is in other languages."),
+ ChatMessage.from_user("Tell me about Berlin"),
+ ]
+ params = {"metadata": {"metadata_key": "metadata_value", "meta2": "value2"}}
+
+ return (
+ pipe,
+ [
+ PipelineRunData(
+ inputs={"mm1": params, "mm2": params, "prompt_builder": {"prompt_source": messages}},
+ expected_outputs={
+ "mm1": {
+ "merged_message": "Always respond "
+ "in English even "
+ "if some input "
+ "data is in other "
+ "languages.\n"
+ "Tell me about "
+ "Berlin"
+ },
+ "mm2": {"merged_message": "Fake message"},
+ },
+ expected_run_order=["prompt_builder", "llm", "mm1", "mm2"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has a greedy and variadic component after a component with default input",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_a_greedy_and_variadic_component_after_a_component_with_default_input():
+ """
+ This test verifies that `Pipeline.run()` executes the components in the correct order when
+ there's a greedy Component with variadic input right before a Component with at least one default input.
+
+ We use the `spying_tracer` fixture to simplify the code to verify the order of execution.
+ This creates some coupling between this test and how we trace the Pipeline execution.
+ A worthy tradeoff in my opinion, we will notice right away if we change either the run logic or
+ the tracing logic.
+ """
+ document_store = InMemoryDocumentStore()
+ document_store.write_documents([Document(content="This is a simple document")])
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ template = "Given this documents: {{ documents|join(', ', attribute='content') }} Answer this question: {{ query }}"
+ pipeline.add_component("retriever", InMemoryBM25Retriever(document_store=document_store))
+ pipeline.add_component("prompt_builder", PromptBuilder(template=template))
+ pipeline.add_component("branch_joiner", BranchJoiner(List[Document]))
+
+ pipeline.connect("retriever", "branch_joiner")
+ pipeline.connect("branch_joiner", "prompt_builder.documents")
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"query": "This is my question"},
+ expected_outputs={
+ "prompt_builder": {
+ "prompt": "Given this "
+ "documents: "
+ "This is a "
+ "simple "
+ "document "
+ "Answer this "
+ "question: "
+ "This is my "
+ "question"
+ }
+ },
+ expected_run_order=["retriever", "branch_joiner", "prompt_builder"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a component that doesn't return a dictionary", target_fixture="pipeline_data")
+def pipeline_that_has_a_component_that_doesnt_return_a_dictionary():
+ BrokenComponent = component_class(
+ "BrokenComponent",
+ input_types={"a": int},
+ output_types={"b": int},
+ output=1, # type:ignore
+ )
+
+ pipe = Pipeline(max_runs_per_component=10)
+ pipe.add_component("comp", BrokenComponent())
+ return pipe, [PipelineRunData({"comp": {"a": 1}})]
+
+
+@given(
+ "a pipeline that has components added in a different order from the order of execution",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_components_added_in_a_different_order_from_the_order_of_execution():
+ """
+ We enqueue the Components in internal `to_run` data structure at the start of `Pipeline.run()` using the order
+ they are added in the Pipeline with `Pipeline.add_component()`.
+ If a Component A with defaults is added before a Component B that has no defaults, but in the Pipeline
+ logic A must be executed after B it could run instead before.
+
+ This test verifies that the order of execution is correct.
+ """
+ docs = [Document(content="Rome is the capital of Italy"), Document(content="Paris is the capital of France")]
+ doc_store = InMemoryDocumentStore()
+ doc_store.write_documents(docs)
+ template = (
+ "Given the following information, answer the question.\n"
+ "Context:\n"
+ "{% for document in documents %}"
+ " {{ document.content }}\n"
+ "{% endfor %}"
+ "Question: {{ query }}"
+ )
+
+ pipe = Pipeline(max_runs_per_component=1)
+
+ # The order of this addition is important for the test
+ # Do not edit them.
+ pipe.add_component("prompt_builder", PromptBuilder(template=template))
+ pipe.add_component("retriever", InMemoryBM25Retriever(document_store=doc_store))
+ pipe.connect("retriever", "prompt_builder.documents")
+
+ query = "What is the capital of France?"
+ return (
+ pipe,
+ [
+ PipelineRunData(
+ inputs={"prompt_builder": {"query": query}, "retriever": {"query": query}},
+ expected_outputs={
+ "prompt_builder": {
+ "prompt": "Given the "
+ "following "
+ "information, "
+ "answer the "
+ "question.\n"
+ "Context:\n"
+ " Paris is "
+ "the capital "
+ "of France\n"
+ " Rome is "
+ "the capital "
+ "of Italy\n"
+ "Question: "
+ "What is the "
+ "capital of "
+ "France?"
+ }
+ },
+ expected_run_order=["retriever", "prompt_builder"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a component with only default inputs", target_fixture="pipeline_data")
+def pipeline_that_has_a_component_with_only_default_inputs():
+ FakeGenerator = component_class(
+ "FakeGenerator", input_types={"prompt": str}, output_types={"replies": List[str]}, output={"replies": ["Paris"]}
+ )
+ docs = [Document(content="Rome is the capital of Italy"), Document(content="Paris is the capital of France")]
+ doc_store = InMemoryDocumentStore()
+ doc_store.write_documents(docs)
+ template = (
+ "Given the following information, answer the question.\n"
+ "Context:\n"
+ "{% for document in documents %}"
+ " {{ document.content }}\n"
+ "{% endfor %}"
+ "Question: {{ query }}"
+ )
+
+ pipe = Pipeline(max_runs_per_component=1)
+
+ pipe.add_component("retriever", InMemoryBM25Retriever(document_store=doc_store))
+ pipe.add_component("prompt_builder", PromptBuilder(template=template))
+ pipe.add_component("generator", FakeGenerator())
+ pipe.add_component("answer_builder", AnswerBuilder())
+
+ pipe.connect("retriever", "prompt_builder.documents")
+ pipe.connect("prompt_builder.prompt", "generator.prompt")
+ pipe.connect("generator.replies", "answer_builder.replies")
+ pipe.connect("retriever.documents", "answer_builder.documents")
+
+ return (
+ pipe,
+ [
+ PipelineRunData(
+ inputs={"query": "What is the capital of France?"},
+ expected_outputs={
+ "answer_builder": {
+ "answers": [
+ GeneratedAnswer(
+ data="Paris",
+ query="What " "is " "the " "capital " "of " "France?",
+ documents=[
+ Document(
+ id="413dccdf51a54cca75b7ed2eddac04e6e58560bd2f0caf4106a3efc023fe3651",
+ content="Paris is the capital of France",
+ score=1.600237583702734,
+ ),
+ Document(
+ id="a4a874fc2ef75015da7924d709fbdd2430e46a8e94add6e0f26cd32c1c03435d",
+ content="Rome is the capital of Italy",
+ score=1.2536639934227616,
+ ),
+ ],
+ meta={},
+ )
+ ]
+ }
+ },
+ expected_run_order=["retriever", "prompt_builder", "generator", "answer_builder"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has a component with only default inputs as first to run and receives inputs from a loop",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_a_component_with_only_default_inputs_as_first_to_run_and_receives_inputs_from_a_loop():
+ """
+ This tests verifies that a Pipeline doesn't get stuck running in a loop if
+ it has all the following characterics:
+ - The first Component has all defaults for its inputs
+ - The first Component receives one input from the user
+ - The first Component receives one input from a loop in the Pipeline
+ - The second Component has at least one default input
+ """
+
+ def fake_generator_run(self, generation_kwargs: Optional[Dict[str, Any]] = None, **kwargs):
+ # Simple hack to simulate a model returning a different reply after the
+ # the first time it's called
+ if getattr(fake_generator_run, "called", False):
+ return {"replies": ["Rome"]}
+ fake_generator_run.called = True
+ return {"replies": ["Paris"]}
+
+ FakeGenerator = component_class(
+ "FakeGenerator",
+ input_types={"prompt": str},
+ output_types={"replies": List[str]},
+ extra_fields={"run": fake_generator_run},
+ )
+ template = (
+ "Answer the following question.\n"
+ "{% if previous_replies %}\n"
+ "Previously you replied incorrectly this:\n"
+ "{% for reply in previous_replies %}\n"
+ " - {{ reply }}\n"
+ "{% endfor %}\n"
+ "{% endif %}\n"
+ "Question: {{ query }}"
+ )
+ router = ConditionalRouter(
+ routes=[
+ {
+ "condition": "{{ replies == ['Rome'] }}",
+ "output": "{{ replies }}",
+ "output_name": "correct_replies",
+ "output_type": List[int],
+ },
+ {
+ "condition": "{{ replies == ['Paris'] }}",
+ "output": "{{ replies }}",
+ "output_name": "incorrect_replies",
+ "output_type": List[int],
+ },
+ ]
+ )
+
+ pipe = Pipeline(max_runs_per_component=1)
+
+ pipe.add_component("prompt_builder", PromptBuilder(template=template))
+ pipe.add_component("generator", FakeGenerator())
+ pipe.add_component("router", router)
+
+ pipe.connect("prompt_builder.prompt", "generator.prompt")
+ pipe.connect("generator.replies", "router.replies")
+ pipe.connect("router.incorrect_replies", "prompt_builder.previous_replies")
+
+ return (
+ pipe,
+ [
+ PipelineRunData(
+ inputs={"prompt_builder": {"query": "What is the capital of " "Italy?"}},
+ expected_outputs={"router": {"correct_replies": ["Rome"]}},
+ expected_run_order=["prompt_builder", "generator", "router", "prompt_builder", "generator", "router"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has multiple branches that merge into a component with a single variadic input",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_multiple_branches_that_merge_into_a_component_with_a_single_variadic_input():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("add_one", AddFixedValue())
+ pipeline.add_component("parity", Remainder(divisor=2))
+ pipeline.add_component("add_ten", AddFixedValue(add=10))
+ pipeline.add_component("double", Double())
+ pipeline.add_component("add_four", AddFixedValue(add=4))
+ pipeline.add_component("add_one_again", AddFixedValue())
+ pipeline.add_component("sum", Sum())
+
+ pipeline.connect("add_one.result", "parity.value")
+ pipeline.connect("parity.remainder_is_0", "add_ten.value")
+ pipeline.connect("parity.remainder_is_1", "double.value")
+ pipeline.connect("add_one.result", "sum.values")
+ pipeline.connect("add_ten.result", "sum.values")
+ pipeline.connect("double.value", "sum.values")
+ pipeline.connect("parity.remainder_is_1", "add_four.value")
+ pipeline.connect("add_four.result", "add_one_again.value")
+ pipeline.connect("add_one_again.result", "sum.values")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 1}},
+ expected_outputs={"sum": {"total": 14}},
+ expected_run_order=["add_one", "parity", "add_ten", "sum"],
+ ),
+ PipelineRunData(
+ inputs={"add_one": {"value": 2}},
+ expected_outputs={"sum": {"total": 17}},
+ expected_run_order=["add_one", "parity", "double", "add_four", "add_one_again", "sum"],
+ ),
+ ],
+ )
+
+
+@given(
+ "a pipeline that has multiple branches of different lengths that merge into a component with a single variadic input",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_multiple_branches_of_different_lengths_that_merge_into_a_component_with_a_single_variadic_input():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_addition", AddFixedValue(add=2))
+ pipeline.add_component("second_addition", AddFixedValue(add=2))
+ pipeline.add_component("third_addition", AddFixedValue(add=2))
+ pipeline.add_component("sum", Sum())
+ pipeline.add_component("fourth_addition", AddFixedValue(add=1))
+
+ pipeline.connect("first_addition.result", "second_addition.value")
+ pipeline.connect("first_addition.result", "sum.values")
+ pipeline.connect("second_addition.result", "sum.values")
+ pipeline.connect("third_addition.result", "sum.values")
+ pipeline.connect("sum.total", "fourth_addition.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}, "third_addition": {"value": 1}},
+ expected_outputs={"fourth_addition": {"result": 12}},
+ expected_run_order=["first_addition", "third_addition", "second_addition", "sum", "fourth_addition"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that is linear and returns intermediate outputs", target_fixture="pipeline_data")
+def pipeline_that_is_linear_and_returns_intermediate_outputs():
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_addition", AddFixedValue(add=2))
+ pipeline.add_component("second_addition", AddFixedValue())
+ pipeline.add_component("double", Double())
+ pipeline.connect("first_addition", "double")
+ pipeline.connect("double", "second_addition")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}},
+ include_outputs_from={"second_addition", "double", "first_addition"},
+ expected_outputs={
+ "double": {"value": 6},
+ "first_addition": {"result": 3},
+ "second_addition": {"result": 7},
+ },
+ expected_run_order=["first_addition", "double", "second_addition"],
+ ),
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}},
+ include_outputs_from={"double"},
+ expected_outputs={"double": {"value": 6}, "second_addition": {"result": 7}},
+ expected_run_order=["first_addition", "double", "second_addition"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has a loop and returns intermediate outputs from it", target_fixture="pipeline_data")
+def pipeline_that_has_a_loop_and_returns_intermediate_outputs_from_it():
+ pipeline = Pipeline(max_runs_per_component=10)
+ pipeline.add_component("add_one", AddFixedValue(add=1))
+ pipeline.add_component("branch_joiner", BranchJoiner(type_=int))
+ pipeline.add_component("below_10", Threshold(threshold=10))
+ pipeline.add_component("below_5", Threshold(threshold=5))
+ pipeline.add_component("add_three", AddFixedValue(add=3))
+ pipeline.add_component("accumulator", Accumulate())
+ pipeline.add_component("add_two", AddFixedValue(add=2))
+
+ pipeline.connect("add_one.result", "branch_joiner")
+ pipeline.connect("branch_joiner.value", "below_10.value")
+ pipeline.connect("below_10.below", "accumulator.value")
+ pipeline.connect("accumulator.value", "below_5.value")
+ pipeline.connect("below_5.above", "add_three.value")
+ pipeline.connect("below_5.below", "branch_joiner")
+ pipeline.connect("add_three.result", "branch_joiner")
+ pipeline.connect("below_10.above", "add_two.value")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"add_one": {"value": 3}},
+ include_outputs_from={
+ "add_two",
+ "add_one",
+ "branch_joiner",
+ "below_10",
+ "accumulator",
+ "below_5",
+ "add_three",
+ },
+ expected_outputs={
+ "add_two": {"result": 13},
+ "add_one": {"result": 4},
+ "branch_joiner": {"value": 11},
+ "below_10": {"above": 11},
+ "accumulator": {"value": 8},
+ "below_5": {"above": 8},
+ "add_three": {"result": 11},
+ },
+ expected_run_order=[
+ "add_one",
+ "branch_joiner",
+ "below_10",
+ "accumulator",
+ "below_5",
+ "branch_joiner",
+ "below_10",
+ "accumulator",
+ "below_5",
+ "add_three",
+ "branch_joiner",
+ "below_10",
+ "add_two",
+ ],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that is linear and returns intermediate outputs from multiple sockets", target_fixture="pipeline_data"
+)
+def pipeline_that_is_linear_and_returns_intermediate_outputs_from_multiple_sockets():
+ @component
+ class DoubleWithOriginal:
+ """
+ Doubles the input value and returns the original value as well.
+ """
+
+ @component.output_types(value=int, original=int)
+ def run(self, value: int):
+ return {"value": value * 2, "original": value}
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_addition", AddFixedValue(add=2))
+ pipeline.add_component("second_addition", AddFixedValue())
+ pipeline.add_component("double", DoubleWithOriginal())
+ pipeline.connect("first_addition", "double")
+ pipeline.connect("double.value", "second_addition")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}},
+ include_outputs_from={"second_addition", "double", "first_addition"},
+ expected_outputs={
+ "double": {"original": 3, "value": 6},
+ "first_addition": {"result": 3},
+ "second_addition": {"result": 7},
+ },
+ expected_run_order=["first_addition", "double", "second_addition"],
+ ),
+ PipelineRunData(
+ inputs={"first_addition": {"value": 1}},
+ include_outputs_from={"double"},
+ expected_outputs={"double": {"original": 3, "value": 6}, "second_addition": {"result": 7}},
+ expected_run_order=["first_addition", "double", "second_addition"],
+ ),
+ ],
+ )
+
+
+@given(
+ "a pipeline that has a component with default inputs that doesn't receive anything from its sender",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_a_component_with_default_inputs_that_doesnt_receive_anything_from_its_sender():
+ routes = [
+ {"condition": "{{'reisen' in sentence}}", "output": "German", "output_name": "language_1", "output_type": str},
+ {"condition": "{{'viajar' in sentence}}", "output": "Spanish", "output_name": "language_2", "output_type": str},
+ ]
+ router = ConditionalRouter(routes)
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("router", router)
+ pipeline.add_component("pb", PromptBuilder(template="Ok, I know, that's {{language}}"))
+ pipeline.connect("router.language_2", "pb.language")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"router": {"sentence": "Wir mussen reisen"}},
+ expected_outputs={"router": {"language_1": "German"}},
+ expected_run_order=["router"],
+ ),
+ PipelineRunData(
+ inputs={"router": {"sentence": "Yo tengo que viajar"}},
+ expected_outputs={"pb": {"prompt": "Ok, I know, that's Spanish"}},
+ expected_run_order=["router", "pb"],
+ ),
+ ],
+ )
+
+
+@given(
+ "a pipeline that has a component with default inputs that doesn't receive anything from its sender but receives input from user",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_a_component_with_default_inputs_that_doesnt_receive_anything_from_its_sender_but_receives_input_from_user():
+ prompt = PromptBuilder(
+ template="""Please generate an SQL query. The query should answer the following Question: {{ question }};
+ If the question cannot be answered given the provided table and columns, return 'no_answer'
+ The query is to be answered for the table is called 'absenteeism' with the following
+ Columns: {{ columns }};
+ Answer:"""
+ )
+
+ @component
+ class FakeGenerator:
+ @component.output_types(replies=List[str])
+ def run(self, prompt: str):
+ if "no_answer" in prompt:
+ return {"replies": ["There's simply no_answer to this question"]}
+ return {"replies": ["Some SQL query"]}
+
+ @component
+ class FakeSQLQuerier:
+ @component.output_types(results=str)
+ def run(self, query: str):
+ return {"results": "This is the query result", "query": query}
+
+ llm = FakeGenerator()
+ sql_querier = FakeSQLQuerier()
+
+ routes = [
+ {
+ "condition": "{{'no_answer' not in replies[0]}}",
+ "output": "{{replies[0]}}",
+ "output_name": "sql",
+ "output_type": str,
+ },
+ {
+ "condition": "{{'no_answer' in replies[0]}}",
+ "output": "{{question}}",
+ "output_name": "go_to_fallback",
+ "output_type": str,
+ },
+ ]
+
+ router = ConditionalRouter(routes)
+
+ fallback_prompt = PromptBuilder(
+ template="""User entered a query that cannot be answered with the given table.
+ The query was: {{ question }} and the table had columns: {{ columns }}.
+ Let the user know why the question cannot be answered"""
+ )
+ fallback_llm = FakeGenerator()
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("prompt", prompt)
+ pipeline.add_component("llm", llm)
+ pipeline.add_component("router", router)
+ pipeline.add_component("fallback_prompt", fallback_prompt)
+ pipeline.add_component("fallback_llm", fallback_llm)
+ pipeline.add_component("sql_querier", sql_querier)
+
+ pipeline.connect("prompt", "llm")
+ pipeline.connect("llm.replies", "router.replies")
+ pipeline.connect("router.sql", "sql_querier.query")
+ pipeline.connect("router.go_to_fallback", "fallback_prompt.question")
+ pipeline.connect("fallback_prompt", "fallback_llm")
+
+ columns = "Age, Absenteeism_time_in_hours, Days, Disciplinary_failure"
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={
+ "prompt": {"question": "This is a question with no_answer", "columns": columns},
+ "router": {"question": "This is a question with no_answer"},
+ },
+ expected_outputs={"fallback_llm": {"replies": ["There's simply no_answer to this question"]}},
+ expected_run_order=["prompt", "llm", "router", "fallback_prompt", "fallback_llm"],
+ )
+ ],
+ [
+ PipelineRunData(
+ inputs={
+ "prompt": {"question": "This is a question that has an answer", "columns": columns},
+ "router": {"question": "This is a question that has an answer"},
+ },
+ expected_outputs={"sql_querier": {"results": "This is the query result", "query": "Some SQL query"}},
+ expected_run_order=["prompt", "llm", "router", "sql_querier"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has a loop and a component with default inputs that doesn't receive anything from its sender but receives input from user",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_a_loop_and_a_component_with_default_inputs_that_doesnt_receive_anything_from_its_sender_but_receives_input_from_user():
+ template = """
+ You are an experienced and accurate Turkish CX speacialist that classifies customer comments into pre-defined categories below:\n
+ Negative experience labels:
+ - Late delivery
+ - Rotten/spoilt item
+ - Bad Courier behavior
+
+ Positive experience labels:
+ - Good courier behavior
+ - Thanks & appreciation
+ - Love message to courier
+ - Fast delivery
+ - Quality of products
+
+ Create a JSON object as a response. The fields are: 'positive_experience', 'negative_experience'.
+ Assign at least one of the pre-defined labels to the given customer comment under positive and negative experience fields.
+ If the comment has a positive experience, list the label under 'positive_experience' field.
+ If the comments has a negative_experience, list it under the 'negative_experience' field.
+ Here is the comment:\n{{ comment }}\n. Just return the category names in the list. If there aren't any, return an empty list.
+
+ {% if invalid_replies and error_message %}
+ You already created the following output in a previous attempt: {{ invalid_replies }}
+ However, this doesn't comply with the format requirements from above and triggered this Python exception: {{ error_message }}
+ Correct the output and try again. Just return the corrected output without any extra explanations.
+ {% endif %}
+ """
+ prompt_builder = PromptBuilder(template=template)
+
+ @component
+ class FakeOutputValidator:
+ @component.output_types(
+ valid_replies=List[str], invalid_replies=Optional[List[str]], error_message=Optional[str]
+ )
+ def run(self, replies: List[str]):
+ if not getattr(self, "called", False):
+ self.called = True
+ return {"invalid_replies": ["This is an invalid reply"], "error_message": "this is an error message"}
+ return {"valid_replies": replies}
+
+ @component
+ class FakeGenerator:
+ @component.output_types(replies=List[str])
+ def run(self, prompt: str):
+ return {"replies": ["This is a valid reply"]}
+
+ llm = FakeGenerator()
+ validator = FakeOutputValidator()
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("prompt_builder", prompt_builder)
+
+ pipeline.add_component("llm", llm)
+ pipeline.add_component("output_validator", validator)
+
+ pipeline.connect("prompt_builder.prompt", "llm.prompt")
+ pipeline.connect("llm.replies", "output_validator.replies")
+ pipeline.connect("output_validator.invalid_replies", "prompt_builder.invalid_replies")
+
+ pipeline.connect("output_validator.error_message", "prompt_builder.error_message")
+
+ comment = "I loved the quality of the meal but the courier was rude"
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"prompt_builder": {"template_variables": {"comment": comment}}},
+ expected_outputs={"output_validator": {"valid_replies": ["This is a valid reply"]}},
+ expected_run_order=[
+ "prompt_builder",
+ "llm",
+ "output_validator",
+ "prompt_builder",
+ "llm",
+ "output_validator",
+ ],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that has multiple components with only default inputs and are added in a different order from the order of execution",
+ target_fixture="pipeline_data",
+)
+def pipeline_that_has_multiple_components_with_only_default_inputs_and_are_added_in_a_different_order_from_the_order_of_execution():
+ prompt_builder1 = PromptBuilder(
+ template="""
+ You are a spellchecking system. Check the given query and fill in the corrected query.
+
+ Question: {{question}}
+ Corrected question:
+ """
+ )
+ prompt_builder2 = PromptBuilder(
+ template="""
+ According to these documents:
+
+ {% for doc in documents %}
+ {{ doc.content }}
+ {% endfor %}
+
+ Answer the given question: {{question}}
+ Answer:
+ """
+ )
+ prompt_builder3 = PromptBuilder(
+ template="""
+ {% for ans in replies %}
+ {{ ans }}
+ {% endfor %}
+ """
+ )
+
+ @component
+ class FakeRetriever:
+ @component.output_types(documents=List[Document])
+ def run(
+ self,
+ query: str,
+ filters: Optional[Dict[str, Any]] = None,
+ top_k: Optional[int] = None,
+ scale_score: Optional[bool] = None,
+ ):
+ return {"documents": [Document(content="This is a document")]}
+
+ @component
+ class FakeRanker:
+ @component.output_types(documents=List[Document])
+ def run(
+ self,
+ query: str,
+ documents: List[Document],
+ top_k: Optional[int] = None,
+ scale_score: Optional[bool] = None,
+ calibration_factor: Optional[float] = None,
+ score_threshold: Optional[float] = None,
+ ):
+ return {"documents": documents}
+
+ @component
+ class FakeGenerator:
+ @component.output_types(replies=List[str], meta=Dict[str, Any])
+ def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):
+ return {"replies": ["This is a reply"], "meta": {"meta_key": "meta_value"}}
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component(name="retriever", instance=FakeRetriever())
+ pipeline.add_component(name="ranker", instance=FakeRanker())
+ pipeline.add_component(name="prompt_builder2", instance=prompt_builder2)
+ pipeline.add_component(name="prompt_builder1", instance=prompt_builder1)
+ pipeline.add_component(name="prompt_builder3", instance=prompt_builder3)
+ pipeline.add_component(name="llm", instance=FakeGenerator())
+ pipeline.add_component(name="spellchecker", instance=FakeGenerator())
+
+ pipeline.connect("prompt_builder1", "spellchecker")
+ pipeline.connect("spellchecker.replies", "prompt_builder3")
+ pipeline.connect("prompt_builder3", "retriever.query")
+ pipeline.connect("prompt_builder3", "ranker.query")
+ pipeline.connect("retriever.documents", "ranker.documents")
+ pipeline.connect("ranker.documents", "prompt_builder2.documents")
+ pipeline.connect("prompt_builder3", "prompt_builder2.question")
+ pipeline.connect("prompt_builder2", "llm")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"prompt_builder1": {"question": "Wha i Acromegaly?"}},
+ expected_outputs={
+ "llm": {"replies": ["This is a reply"], "meta": {"meta_key": "meta_value"}},
+ "spellchecker": {"meta": {"meta_key": "meta_value"}},
+ },
+ expected_run_order=[
+ "prompt_builder1",
+ "spellchecker",
+ "prompt_builder3",
+ "retriever",
+ "ranker",
+ "prompt_builder2",
+ "llm",
+ ],
+ )
+ ],
+ )
+
+
+@given("a pipeline that is linear with conditional branching and multiple joins", target_fixture="pipeline_data")
+def that_is_linear_with_conditional_branching_and_multiple_joins():
+ pipeline = Pipeline()
+
+ @component
+ class FakeRouter:
+ @component.output_types(LEGIT=str, INJECTION=str)
+ def run(self, query: str):
+ if "injection" in query:
+ return {"INJECTION": query}
+ return {"LEGIT": query}
+
+ @component
+ class FakeEmbedder:
+ @component.output_types(embeddings=List[float])
+ def run(self, text: str):
+ return {"embeddings": [1.0, 2.0, 3.0]}
+
+ @component
+ class FakeRanker:
+ @component.output_types(documents=List[Document])
+ def run(self, query: str, documents: List[Document]):
+ return {"documents": documents}
+
+ @component
+ class FakeRetriever:
+ @component.output_types(documents=List[Document])
+ def run(self, query: str):
+ if "injection" in query:
+ return {"documents": []}
+ return {"documents": [Document(content="This is a document")]}
+
+ @component
+ class FakeEmbeddingRetriever:
+ @component.output_types(documents=List[Document])
+ def run(self, query_embedding: List[float]):
+ return {"documents": [Document(content="This is another document")]}
+
+ pipeline.add_component(name="router", instance=FakeRouter())
+ pipeline.add_component(name="text_embedder", instance=FakeEmbedder())
+ pipeline.add_component(name="retriever", instance=FakeEmbeddingRetriever())
+ pipeline.add_component(name="emptyretriever", instance=FakeRetriever())
+ pipeline.add_component(name="joinerfinal", instance=DocumentJoiner())
+ pipeline.add_component(name="joinerhybrid", instance=DocumentJoiner())
+ pipeline.add_component(name="ranker", instance=FakeRanker())
+ pipeline.add_component(name="bm25retriever", instance=FakeRetriever())
+
+ pipeline.connect("router.INJECTION", "emptyretriever.query")
+ pipeline.connect("router.LEGIT", "text_embedder.text")
+ pipeline.connect("text_embedder", "retriever.query_embedding")
+ pipeline.connect("router.LEGIT", "ranker.query")
+ pipeline.connect("router.LEGIT", "bm25retriever.query")
+ pipeline.connect("bm25retriever", "joinerhybrid.documents")
+ pipeline.connect("retriever", "joinerhybrid.documents")
+ pipeline.connect("joinerhybrid.documents", "ranker.documents")
+ pipeline.connect("ranker", "joinerfinal.documents")
+ pipeline.connect("emptyretriever", "joinerfinal.documents")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"router": {"query": "I'm a legit question"}},
+ expected_outputs={
+ "joinerfinal": {
+ "documents": [
+ Document(content="This is a document"),
+ Document(content="This is another document"),
+ ]
+ }
+ },
+ expected_run_order=[
+ "router",
+ "text_embedder",
+ "bm25retriever",
+ "retriever",
+ "joinerhybrid",
+ "ranker",
+ "joinerfinal",
+ ],
+ ),
+ PipelineRunData(
+ inputs={"router": {"query": "I'm a nasty prompt injection"}},
+ expected_outputs={"joinerfinal": {"documents": []}},
+ expected_run_order=["router", "emptyretriever", "joinerfinal"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that is a simple agent", target_fixture="pipeline_data")
+def that_is_a_simple_agent():
+ search_message_template = """
+ Given these web search results:
+
+ {% for doc in documents %}
+ {{ doc.content }}
+ {% endfor %}
+
+ Be as brief as possible, max one sentence.
+ Answer the question: {{search_query}}
+ """
+
+ react_message_template = """
+ Solve a question answering task with interleaving Thought, Action, Observation steps.
+
+ Thought reasons about the current situation
+
+ Action can be:
+ google_search - Searches Google for the exact concept/entity (given in square brackets) and returns the results for you to use
+ finish - Returns the final answer (given in square brackets) and finishes the task
+
+ Observation summarizes the Action outcome and helps in formulating the next
+ Thought in Thought, Action, Observation interleaving triplet of steps.
+
+ After each Observation, provide the next Thought and next Action.
+ Don't execute multiple steps even though you know the answer.
+ Only generate Thought and Action, never Observation, you'll get Observation from Action.
+ Follow the pattern in the example below.
+
+ Example:
+ ###########################
+ Question: Which magazine was started first Arthur’s Magazine or First for Women?
+ Thought: I need to search Arthur’s Magazine and First for Women, and find which was started
+ first.
+ Action: google_search[When was 'Arthur’s Magazine' started?]
+ Observation: Arthur’s Magazine was an American literary periodical ˘
+ published in Philadelphia and founded in 1844. Edited by Timothy Shay Arthur, it featured work by
+ Edgar A. Poe, J.H. Ingraham, Sarah Josepha Hale, Thomas G. Spear, and others. In May 1846
+ it was merged into Godey’s Lady’s Book.
+ Thought: Arthur’s Magazine was started in 1844. I need to search First for Women founding date next
+ Action: google_search[When was 'First for Women' magazine started?]
+ Observation: First for Women is a woman’s magazine published by Bauer Media Group in the
+ USA. The magazine was started in 1989. It is based in Englewood Cliffs, New Jersey. In 2011
+ the circulation of the magazine was 1,310,696 copies.
+ Thought: First for Women was started in 1989. 1844 (Arthur’s Magazine) ¡ 1989 (First for
+ Women), so Arthur’s Magazine was started first.
+ Action: finish[Arthur’s Magazine]
+ ############################
+
+ Let's start, the question is: {{query}}
+
+ Thought:
+ """
+
+ routes = [
+ {
+ "condition": "{{'search' in tool_id_and_param[0]}}",
+ "output": "{{tool_id_and_param[1]}}",
+ "output_name": "search",
+ "output_type": str,
+ },
+ {
+ "condition": "{{'finish' in tool_id_and_param[0]}}",
+ "output": "{{tool_id_and_param[1]}}",
+ "output_name": "finish",
+ "output_type": str,
+ },
+ ]
+
+ @component
+ class FakeThoughtActionOpenAIChatGenerator:
+ run_counter = 0
+
+ @component.output_types(replies=List[ChatMessage])
+ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, Any]] = None):
+ if self.run_counter == 0:
+ self.run_counter += 1
+ return {
+ "replies": [
+ ChatMessage.from_assistant(
+ "thinking\n Action: google_search[What is taller, Eiffel Tower or Leaning Tower of Pisa]\n"
+ )
+ ]
+ }
+
+ return {"replies": [ChatMessage.from_assistant("thinking\n Action: finish[Eiffel Tower]\n")]}
+
+ @component
+ class FakeConclusionOpenAIChatGenerator:
+ @component.output_types(replies=List[ChatMessage])
+ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, Any]] = None):
+ return {"replies": [ChatMessage.from_assistant("Tower of Pisa is 55 meters tall\n")]}
+
+ @component
+ class FakeSerperDevWebSearch:
+ @component.output_types(documents=List[Document])
+ def run(self, query: str):
+ return {
+ "documents": [
+ Document(content="Eiffel Tower is 300 meters tall"),
+ Document(content="Tower of Pisa is 55 meters tall"),
+ ]
+ }
+
+ # main part
+ pipeline = Pipeline()
+ pipeline.add_component("main_input", BranchJoiner(List[ChatMessage]))
+ pipeline.add_component("prompt_builder", ChatPromptBuilder(variables=["query"]))
+ pipeline.add_component("llm", FakeThoughtActionOpenAIChatGenerator())
+
+ @component
+ class ToolExtractor:
+ @component.output_types(output=List[str])
+ def run(self, messages: List[ChatMessage]):
+ prompt: str = messages[-1].content
+ lines = prompt.strip().split("\n")
+ for line in reversed(lines):
+ pattern = r"Action:\s*(\w+)\[(.*?)\]"
+
+ match = re.search(pattern, line)
+ if match:
+ action_name = match.group(1)
+ parameter = match.group(2)
+ return {"output": [action_name, parameter]}
+ return {"output": [None, None]}
+
+ pipeline.add_component("tool_extractor", ToolExtractor())
+
+ @component
+ class PromptConcatenator:
+ def __init__(self, suffix: str = ""):
+ self._suffix = suffix
+
+ @component.output_types(output=List[ChatMessage])
+ def run(self, replies: List[ChatMessage], current_prompt: List[ChatMessage]):
+ content = current_prompt[-1].content + replies[-1].content + self._suffix
+ return {"output": [ChatMessage.from_user(content)]}
+
+ @component
+ class SearchOutputAdapter:
+ @component.output_types(output=List[ChatMessage])
+ def run(self, replies: List[ChatMessage]):
+ content = f"Observation: {replies[-1].content}\n"
+ return {"output": [ChatMessage.from_assistant(content)]}
+
+ pipeline.add_component("prompt_concatenator_after_action", PromptConcatenator())
+
+ pipeline.add_component("router", ConditionalRouter(routes))
+ pipeline.add_component("router_search", FakeSerperDevWebSearch())
+ pipeline.add_component("search_prompt_builder", ChatPromptBuilder(variables=["documents", "search_query"]))
+ pipeline.add_component("search_llm", FakeConclusionOpenAIChatGenerator())
+
+ pipeline.add_component("search_output_adapter", SearchOutputAdapter())
+ pipeline.add_component("prompt_concatenator_after_observation", PromptConcatenator(suffix="\nThought: "))
+
+ # main
+ pipeline.connect("main_input", "prompt_builder.template")
+ pipeline.connect("prompt_builder.prompt", "llm.messages")
+ pipeline.connect("llm.replies", "prompt_concatenator_after_action.replies")
+
+ # tools
+ pipeline.connect("prompt_builder.prompt", "prompt_concatenator_after_action.current_prompt")
+ pipeline.connect("prompt_concatenator_after_action", "tool_extractor.messages")
+
+ pipeline.connect("tool_extractor", "router")
+ pipeline.connect("router.search", "router_search.query")
+ pipeline.connect("router_search.documents", "search_prompt_builder.documents")
+ pipeline.connect("router.search", "search_prompt_builder.search_query")
+ pipeline.connect("search_prompt_builder.prompt", "search_llm.messages")
+
+ pipeline.connect("search_llm.replies", "search_output_adapter.replies")
+ pipeline.connect("search_output_adapter", "prompt_concatenator_after_observation.replies")
+ pipeline.connect("prompt_concatenator_after_action", "prompt_concatenator_after_observation.current_prompt")
+ pipeline.connect("prompt_concatenator_after_observation", "main_input")
+
+ search_message = [ChatMessage.from_user(search_message_template)]
+ messages = [ChatMessage.from_user(react_message_template)]
+ question = "which tower is taller: eiffel tower or tower of pisa?"
+
+ return pipeline, [
+ PipelineRunData(
+ inputs={
+ "main_input": {"value": messages},
+ "prompt_builder": {"query": question},
+ "search_prompt_builder": {"template": search_message},
+ },
+ expected_outputs={"router": {"finish": "Eiffel Tower"}},
+ expected_run_order=[
+ "main_input",
+ "prompt_builder",
+ "llm",
+ "prompt_concatenator_after_action",
+ "tool_extractor",
+ "router",
+ "router_search",
+ "search_prompt_builder",
+ "search_llm",
+ "search_output_adapter",
+ "prompt_concatenator_after_observation",
+ "main_input",
+ "prompt_builder",
+ "llm",
+ "prompt_concatenator_after_action",
+ "tool_extractor",
+ "router",
+ ],
+ )
+ ]
+
+
+@given("a pipeline that has a variadic component that receives partial inputs", target_fixture="pipeline_data")
+def that_has_a_variadic_component_that_receives_partial_inputs():
+ @component
+ class ConditionalDocumentCreator:
+ def __init__(self, content: str):
+ self._content = content
+
+ @component.output_types(documents=List[Document], noop=None)
+ def run(self, create_document: bool = False):
+ if create_document:
+ return {"documents": [Document(id=self._content, content=self._content)]}
+ return {"noop": None}
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("first_creator", ConditionalDocumentCreator(content="First document"))
+ pipeline.add_component("second_creator", ConditionalDocumentCreator(content="Second document"))
+ pipeline.add_component("third_creator", ConditionalDocumentCreator(content="Third document"))
+ pipeline.add_component("documents_joiner", DocumentJoiner())
+
+ pipeline.connect("first_creator.documents", "documents_joiner.documents")
+ pipeline.connect("second_creator.documents", "documents_joiner.documents")
+ pipeline.connect("third_creator.documents", "documents_joiner.documents")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"first_creator": {"create_document": True}, "third_creator": {"create_document": True}},
+ expected_outputs={
+ "second_creator": {"noop": None},
+ "documents_joiner": {
+ "documents": [
+ Document(id="First document", content="First document"),
+ Document(id="Third document", content="Third document"),
+ ]
+ },
+ },
+ expected_run_order=["first_creator", "second_creator", "third_creator", "documents_joiner"],
+ ),
+ PipelineRunData(
+ inputs={"first_creator": {"create_document": True}, "second_creator": {"create_document": True}},
+ expected_outputs={
+ "third_creator": {"noop": None},
+ "documents_joiner": {
+ "documents": [
+ Document(id="First document", content="First document"),
+ Document(id="Second document", content="Second document"),
+ ]
+ },
+ },
+ expected_run_order=["first_creator", "second_creator", "third_creator", "documents_joiner"],
+ ),
+ ],
+ )
+
+
+@given("a pipeline that has an answer joiner variadic component", target_fixture="pipeline_data")
+def that_has_an_answer_joiner_variadic_component():
+ query = "What's Natural Language Processing?"
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("answer_builder_1", AnswerBuilder())
+ pipeline.add_component("answer_builder_2", AnswerBuilder())
+ pipeline.add_component("answer_joiner", AnswerJoiner())
+
+ pipeline.connect("answer_builder_1.answers", "answer_joiner")
+ pipeline.connect("answer_builder_2.answers", "answer_joiner")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={
+ "answer_builder_1": {"query": query, "replies": ["This is a test answer"]},
+ "answer_builder_2": {"query": query, "replies": ["This is a second test answer"]},
+ },
+ expected_outputs={
+ "answer_joiner": {
+ "answers": [
+ GeneratedAnswer(
+ data="This is a test answer",
+ query="What's Natural Language Processing?",
+ documents=[],
+ meta={},
+ ),
+ GeneratedAnswer(
+ data="This is a second test answer",
+ query="What's Natural Language Processing?",
+ documents=[],
+ meta={},
+ ),
+ ]
+ }
+ },
+ expected_run_order=["answer_builder_1", "answer_builder_2", "answer_joiner"],
+ )
+ ],
+ )
+
+
+@given(
+ "a pipeline that is linear and a component in the middle receives optional input from other components and input from the user",
+ target_fixture="pipeline_data",
+)
+def that_is_linear_and_a_component_in_the_middle_receives_optional_input_from_other_components_and_input_from_the_user():
+ @component
+ class QueryMetadataExtractor:
+ @component.output_types(filters=Dict[str, str])
+ def run(self, prompt: str):
+ metadata = json.loads(prompt)
+ filters = []
+ for key, value in metadata.items():
+ filters.append({"field": f"meta.{key}", "operator": "==", "value": value})
+
+ return {"filters": {"operator": "AND", "conditions": filters}}
+
+ documents = [
+ Document(
+ content="some publication about Alzheimer prevention research done over 2023 patients study",
+ meta={"year": 2022, "disease": "Alzheimer", "author": "Michael Butter"},
+ id="doc1",
+ ),
+ Document(
+ content="some text about investigation and treatment of Alzheimer disease",
+ meta={"year": 2023, "disease": "Alzheimer", "author": "John Bread"},
+ id="doc2",
+ ),
+ Document(
+ content="A study on the effectiveness of new therapies for Parkinson's disease",
+ meta={"year": 2022, "disease": "Parkinson", "author": "Alice Smith"},
+ id="doc3",
+ ),
+ Document(
+ content="An overview of the latest research on the genetics of Parkinson's disease and its implications for treatment",
+ meta={"year": 2023, "disease": "Parkinson", "author": "David Jones"},
+ id="doc4",
+ ),
+ ]
+ document_store = InMemoryDocumentStore(bm25_algorithm="BM25Plus")
+ document_store.write_documents(documents=documents, policy=DuplicatePolicy.OVERWRITE)
+
+ pipeline = Pipeline()
+ pipeline.add_component(instance=PromptBuilder('{"disease": "Alzheimer", "year": 2023}'), name="builder")
+ pipeline.add_component(instance=QueryMetadataExtractor(), name="metadata_extractor")
+ pipeline.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="retriever")
+ pipeline.add_component(instance=DocumentJoiner(), name="document_joiner")
+
+ pipeline.connect("builder.prompt", "metadata_extractor.prompt")
+ pipeline.connect("metadata_extractor.filters", "retriever.filters")
+ pipeline.connect("retriever.documents", "document_joiner.documents")
+
+ query = "publications 2023 Alzheimer's disease"
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"retriever": {"query": query}},
+ expected_outputs={
+ "document_joiner": {
+ "documents": [
+ Document(
+ content="some text about investigation and treatment of Alzheimer disease",
+ meta={"year": 2023, "disease": "Alzheimer", "author": "John Bread"},
+ id="doc2",
+ score=3.324112496100923,
+ )
+ ]
+ }
+ },
+ expected_run_order=["builder", "metadata_extractor", "retriever", "document_joiner"],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has a cycle that would get it stuck", target_fixture="pipeline_data")
+def that_has_a_cycle_that_would_get_it_stuck():
+ template = """
+ You are an experienced and accurate Turkish CX speacialist that classifies customer comments into pre-defined categories below:\n
+ Negative experience labels:
+ - Late delivery
+ - Rotten/spoilt item
+ - Bad Courier behavior
+
+ Positive experience labels:
+ - Good courier behavior
+ - Thanks & appreciation
+ - Love message to courier
+ - Fast delivery
+ - Quality of products
+
+ Create a JSON object as a response. The fields are: 'positive_experience', 'negative_experience'.
+ Assign at least one of the pre-defined labels to the given customer comment under positive and negative experience fields.
+ If the comment has a positive experience, list the label under 'positive_experience' field.
+ If the comments has a negative_experience, list it under the 'negative_experience' field.
+ Here is the comment:\n{{ comment }}\n. Just return the category names in the list. If there aren't any, return an empty list.
+
+ {% if invalid_replies and error_message %}
+ You already created the following output in a previous attempt: {{ invalid_replies }}
+ However, this doesn't comply with the format requirements from above and triggered this Python exception: {{ error_message }}
+ Correct the output and try again. Just return the corrected output without any extra explanations.
+ {% endif %}
+ """
+ prompt_builder = PromptBuilder(
+ template=template, required_variables=["comment", "invalid_replies", "error_message"]
+ )
+
+ @component
+ class FakeOutputValidator:
+ @component.output_types(
+ valid_replies=List[str], invalid_replies=Optional[List[str]], error_message=Optional[str]
+ )
+ def run(self, replies: List[str]):
+ if not getattr(self, "called", False):
+ self.called = True
+ return {"invalid_replies": ["This is an invalid reply"], "error_message": "this is an error message"}
+ return {"valid_replies": replies}
+
+ @component
+ class FakeGenerator:
+ @component.output_types(replies=List[str])
+ def run(self, prompt: str):
+ return {"replies": ["This is a valid reply"]}
+
+ llm = FakeGenerator()
+ validator = FakeOutputValidator()
+
+ pipeline = Pipeline(max_runs_per_component=1)
+ pipeline.add_component("prompt_builder", prompt_builder)
+
+ pipeline.add_component("llm", llm)
+ pipeline.add_component("output_validator", validator)
+
+ pipeline.connect("prompt_builder.prompt", "llm.prompt")
+ pipeline.connect("llm.replies", "output_validator.replies")
+ pipeline.connect("output_validator.invalid_replies", "prompt_builder.invalid_replies")
+
+ pipeline.connect("output_validator.error_message", "prompt_builder.error_message")
+
+ comment = "I loved the quality of the meal but the courier was rude"
+ return (pipeline, [PipelineRunData(inputs={"prompt_builder": {"comment": comment}})])
+
+
+@given("a pipeline that has a loop in the middle", target_fixture="pipeline_data")
+def that_has_a_loop_in_the_middle():
+ @component
+ class FakeGenerator:
+ @component.output_types(replies=List[str])
+ def run(self, prompt: str):
+ replies = []
+ if getattr(self, "first_run", True):
+ self.first_run = False
+ replies.append("No answer")
+ else:
+ replies.append("42")
+ return {"replies": replies}
+
+ @component
+ class PromptCleaner:
+ @component.output_types(clean_prompt=str)
+ def run(self, prompt: str):
+ return {"clean_prompt": prompt.strip()}
+
+ routes = [
+ {
+ "condition": "{{ 'No answer' in replies }}",
+ "output": "{{ replies }}",
+ "output_name": "invalid_replies",
+ "output_type": List[str],
+ },
+ {
+ "condition": "{{ 'No answer' not in replies }}",
+ "output": "{{ replies }}",
+ "output_name": "valid_replies",
+ "output_type": List[str],
+ },
+ ]
+
+ pipeline = Pipeline(max_runs_per_component=20)
+ pipeline.add_component("prompt_cleaner", PromptCleaner())
+ pipeline.add_component("prompt_builder", PromptBuilder(template="", variables=["question", "invalid_replies"]))
+ pipeline.add_component("llm", FakeGenerator())
+ pipeline.add_component("answer_validator", ConditionalRouter(routes=routes))
+ pipeline.add_component("answer_builder", AnswerBuilder())
+
+ pipeline.connect("prompt_cleaner.clean_prompt", "prompt_builder.template")
+ pipeline.connect("prompt_builder.prompt", "llm.prompt")
+ pipeline.connect("llm.replies", "answer_validator.replies")
+ pipeline.connect("answer_validator.invalid_replies", "prompt_builder.invalid_replies")
+ pipeline.connect("answer_validator.valid_replies", "answer_builder.replies")
+
+ question = "What is the answer?"
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={
+ "prompt_cleaner": {"prompt": "Random template"},
+ "prompt_builder": {"question": question},
+ "answer_builder": {"query": question},
+ },
+ expected_outputs={
+ "answer_builder": {"answers": [GeneratedAnswer(data="42", query=question, documents=[])]}
+ },
+ expected_run_order=[
+ "prompt_cleaner",
+ "prompt_builder",
+ "llm",
+ "answer_validator",
+ "prompt_builder",
+ "llm",
+ "answer_validator",
+ "answer_builder",
+ ],
+ )
+ ],
+ )
+
+
+@given("a pipeline that has variadic component that receives a conditional input", target_fixture="pipeline_data")
+def that_has_variadic_component_that_receives_a_conditional_input():
+ pipe = Pipeline(max_runs_per_component=1)
+ routes = [
+ {
+ "condition": "{{ documents|length > 1 }}",
+ "output": "{{ documents }}",
+ "output_name": "long",
+ "output_type": List[Document],
+ },
+ {
+ "condition": "{{ documents|length <= 1 }}",
+ "output": "{{ documents }}",
+ "output_name": "short",
+ "output_type": List[Document],
+ },
+ ]
+
+ @component
+ class NoOp:
+ @component.output_types(documents=List[Document])
+ def run(self, documents: List[Document]):
+ return {"documents": documents}
+
+ @component
+ class CommaSplitter:
+ @component.output_types(documents=List[Document])
+ def run(self, documents: List[Document]):
+ res = []
+ current_id = 0
+ for doc in documents:
+ for split in doc.content.split(","):
+ res.append(Document(content=split, id=str(current_id)))
+ current_id += 1
+ return {"documents": res}
+
+ pipe.add_component("conditional_router", ConditionalRouter(routes, unsafe=True))
+ pipe.add_component(
+ "empty_lines_cleaner", DocumentCleaner(remove_empty_lines=True, remove_extra_whitespaces=False, keep_id=True)
+ )
+ pipe.add_component("comma_splitter", CommaSplitter())
+ pipe.add_component("document_cleaner", DocumentCleaner(keep_id=True))
+ pipe.add_component("document_joiner", DocumentJoiner())
+
+ pipe.add_component("noop2", NoOp())
+ pipe.add_component("noop3", NoOp())
+
+ pipe.connect("noop2", "noop3")
+ pipe.connect("noop3", "conditional_router")
+
+ pipe.connect("conditional_router.long", "empty_lines_cleaner")
+ pipe.connect("empty_lines_cleaner", "document_joiner")
+
+ pipe.connect("comma_splitter", "document_cleaner")
+ pipe.connect("document_cleaner", "document_joiner")
+ pipe.connect("comma_splitter", "document_joiner")
+
+ document = Document(
+ id="1000", content="This document has so many, sentences. Like this one, or this one. Or even this other one."
+ )
+
+ return pipe, [
+ PipelineRunData(
+ inputs={"noop2": {"documents": [document]}, "comma_splitter": {"documents": [document]}},
+ expected_outputs={
+ "conditional_router": {
+ "short": [
+ Document(
+ id="1000",
+ content="This document has so many, sentences. Like this one, or this one. Or even this other one.",
+ )
+ ]
+ },
+ "document_joiner": {
+ "documents": [
+ Document(id="0", content="This document has so many"),
+ Document(id="1", content=" sentences. Like this one"),
+ Document(id="2", content=" or this one. Or even this other one."),
+ ]
+ },
+ },
+ expected_run_order=[
+ "comma_splitter",
+ "noop2",
+ "document_cleaner",
+ "noop3",
+ "conditional_router",
+ "document_joiner",
+ ],
+ ),
+ PipelineRunData(
+ inputs={
+ "noop2": {"documents": [document, document]},
+ "comma_splitter": {"documents": [document, document]},
+ },
+ expected_outputs={
+ "document_joiner": {
+ "documents": [
+ Document(id="0", content="This document has so many"),
+ Document(id="1", content=" sentences. Like this one"),
+ Document(id="2", content=" or this one. Or even this other one."),
+ Document(id="3", content="This document has so many"),
+ Document(id="4", content=" sentences. Like this one"),
+ Document(id="5", content=" or this one. Or even this other one."),
+ Document(
+ id="1000",
+ content="This document has so many, sentences. Like this one, or this one. Or even this other one.",
+ ),
+ ]
+ }
+ },
+ expected_run_order=[
+ "comma_splitter",
+ "noop2",
+ "document_cleaner",
+ "noop3",
+ "conditional_router",
+ "empty_lines_cleaner",
+ "document_joiner",
+ ],
+ ),
+ ]
+
+
+@given("a pipeline that has a string variadic component", target_fixture="pipeline_data")
+def that_has_a_string_variadic_component():
+ string_1 = "What's Natural Language Processing?"
+ string_2 = "What's is life?"
+
+ pipeline = Pipeline()
+ pipeline.add_component("prompt_builder_1", PromptBuilder("Builder 1: {{query}}"))
+ pipeline.add_component("prompt_builder_2", PromptBuilder("Builder 2: {{query}}"))
+ pipeline.add_component("string_joiner", StringJoiner())
+
+ pipeline.connect("prompt_builder_1.prompt", "string_joiner.strings")
+ pipeline.connect("prompt_builder_2.prompt", "string_joiner.strings")
+
+ return (
+ pipeline,
+ [
+ PipelineRunData(
+ inputs={"prompt_builder_1": {"query": string_1}, "prompt_builder_2": {"query": string_2}},
+ expected_outputs={
+ "string_joiner": {
+ "strings": ["Builder 1: What's Natural Language Processing?", "Builder 2: What's is life?"]
+ }
+ },
+ expected_run_order=["prompt_builder_1", "prompt_builder_2", "string_joiner"],
+ )
+ ],
+ )
diff --git a/testbed/deepset-ai__haystack/test/core/pipeline/test_draw.py b/testbed/deepset-ai__haystack/test/core/pipeline/test_draw.py
new file mode 100644
index 0000000000000000000000000000000000000000..f687f6c587c96c7170a978263fbeb99b428cf160
--- /dev/null
+++ b/testbed/deepset-ai__haystack/test/core/pipeline/test_draw.py
@@ -0,0 +1,96 @@
+# SPDX-FileCopyrightText: 2022-present deepset GmbH
+#
+# SPDX-License-Identifier: Apache-2.0
+from unittest.mock import MagicMock, patch
+
+import pytest
+import requests
+
+from haystack.core.errors import PipelineDrawingError
+from haystack.core.pipeline import Pipeline
+from haystack.core.pipeline.draw import _to_mermaid_image, _to_mermaid_text
+from haystack.testing.sample_components import AddFixedValue, Double
+
+
+@pytest.mark.flaky(reruns=5, reruns_delay=5)
+@pytest.mark.integration
+def test_to_mermaid_image():
+ pipe = Pipeline()
+ pipe.add_component("comp1", Double())
+ pipe.add_component("comp2", Double())
+ pipe.connect("comp1", "comp2")
+
+ image_data = _to_mermaid_image(pipe.graph)
+ # We just verify we received some data as testing the actual image is not reliable
+ assert image_data
+
+
+@patch("haystack.core.pipeline.draw.requests")
+def test_to_mermaid_image_does_not_edit_graph(mock_requests):
+ pipe = Pipeline()
+ pipe.add_component("comp1", AddFixedValue(add=3))
+ pipe.add_component("comp2", Double())
+ pipe.connect("comp1.result", "comp2.value")
+ pipe.connect("comp2.value", "comp1.value")
+
+ mock_requests.get.return_value = MagicMock(status_code=200)
+ expected_pipe = pipe.to_dict()
+ _to_mermaid_image(pipe.graph)
+ assert expected_pipe == pipe.to_dict()
+
+
+def test_to_mermaid_image_failing_request(tmp_path):
+ pipe = Pipeline()
+ pipe.add_component("comp1", Double())
+ pipe.add_component("comp2", Double())
+ pipe.connect("comp1", "comp2")
+ pipe.connect("comp2", "comp1")
+
+ with patch("haystack.core.pipeline.draw.requests.get") as mock_get:
+
+ def raise_for_status(self):
+ raise requests.HTTPError()
+
+ mock_response = MagicMock()
+ mock_response.status_code = 429
+ mock_response.content = '{"error": "too many requests"}'
+ mock_response.raise_for_status = raise_for_status
+ mock_get.return_value = mock_response
+
+ with pytest.raises(PipelineDrawingError, match="There was an issue with https://mermaid.ink/"):
+ _to_mermaid_image(pipe.graph)
+
+
+def test_to_mermaid_text():
+ pipe = Pipeline()
+ pipe.add_component("comp1", AddFixedValue(add=3))
+ pipe.add_component("comp2", Double())
+ pipe.connect("comp1.result", "comp2.value")
+ pipe.connect("comp2.value", "comp1.value")
+
+ text = _to_mermaid_text(pipe.graph)
+ assert (
+ text
+ == """
+%%{ init: {'theme': 'neutral' } }%%
+
+graph TD;
+
+comp1["comp1 AddFixedValue
Optional inputs:
add (Optional[int])
"]:::component -- "result -> value int" --> comp2["comp2 Double"]:::component
+comp2["comp2 Double"]:::component -- "value -> value int" --> comp1["comp1 AddFixedValue
One of the most important things I didn't understand about the world
+when I was a child is the degree to which the returns for performance
+are superlinear.
Teachers and coaches implicitly told us the returns were linear.
+"You get out," I heard a thousand times, "what you put in." They
+meant well, but this is rarely true. If your product is only half
+as good as your competitor's, you don't get half as many customers.
+You get no customers, and you go out of business.
It's obviously true that the returns for performance are superlinear
+in business. Some think this is a flaw of capitalism, and that if
+we changed the rules it would stop being true. But superlinear
+returns for performance are a feature of the world, not an artifact
+of rules we've invented. We see the same pattern in fame, power,
+military victories, knowledge, and even benefit to humanity. In all
+of these, the rich get richer.
+[1]
You can't understand the world without understanding the concept
+of superlinear returns. And if you're ambitious you definitely
+should, because this will be the wave you surf on.
It may seem as if there are a lot of different situations with
+superlinear returns, but as far as I can tell they reduce to two
+fundamental causes: exponential growth and thresholds.
The most obvious case of superlinear returns is when you're working
+on something that grows exponentially. For example, growing bacterial
+cultures. When they grow at all, they grow exponentially. But they're
+tricky to grow. Which means the difference in outcome between someone
+who's adept at it and someone who's not is very great.
Startups can also grow exponentially, and we see the same pattern
+there. Some manage to achieve high growth rates. Most don't. And
+as a result you get qualitatively different outcomes: the companies
+with high growth rates tend to become immensely valuable, while the
+ones with lower growth rates may not even survive.
Y Combinator encourages founders to focus on growth rate rather
+than absolute numbers. It prevents them from being discouraged early
+on, when the absolute numbers are still low. It also helps them
+decide what to focus on: you can use growth rate as a compass to
+tell you how to evolve the company. But the main advantage is that
+by focusing on growth rate you tend to get something that grows
+exponentially.
YC doesn't explicitly tell founders that with growth rate "you get
+out what you put in," but it's not far from the truth. And if growth
+rate were proportional to performance, then the reward for performance
+p over time t would be proportional to pt.
Even after decades of thinking about this, I find that sentence
+startling.
Whenever how well you do depends on how well you've done, you'll
+get exponential growth. But neither our DNA nor our customs prepare
+us for it. No one finds exponential growth natural; every child is
+surprised, the first time they hear it, by the story of the man who
+asks the king for a single grain of rice the first day and double
+the amount each successive day.
What we don't understand naturally we develop customs to deal with,
+but we don't have many customs about exponential growth either,
+because there have been so few instances of it in human history.
+In principle herding should have been one: the more animals you
+had, the more offspring they'd have. But in practice grazing land
+was the limiting factor, and there was no plan for growing that
+exponentially.
Or more precisely, no generally applicable plan. There was a way
+to grow one's territory exponentially: by conquest. The more territory
+you control, the more powerful your army becomes, and the easier
+it is to conquer new territory. This is why history is full of
+empires. But so few people created or ran empires that their
+experiences didn't affect customs very much. The emperor was a
+remote and terrifying figure, not a source of lessons one could use
+in one's own life.
The most common case of exponential growth in preindustrial times
+was probably scholarship. The more you know, the easier it is to
+learn new things. The result, then as now, was that some people
+were startlingly more knowledgeable than the rest about certain
+topics. But this didn't affect customs much either. Although empires
+of ideas can overlap and there can thus be far more emperors, in
+preindustrial times this type of empire had little practical effect.
+[2]
That has changed in the last few centuries. Now the emperors of
+ideas can design bombs that defeat the emperors of territory. But
+this phenomenon is still so new that we haven't fully assimilated
+it. Few even of the participants realize they're benefitting from
+exponential growth or ask what they can learn from other instances
+of it.
The other source of superlinear returns is embodied in the expression
+"winner take all." In a sports match the relationship between
+performance and return is a step function: the winning team gets
+one win whether they do much better or just slightly better.
+[3]
The source of the step function is not competition per se, however.
+It's that there are thresholds in the outcome. You don't need
+competition to get those. There can be thresholds in situations
+where you're the only participant, like proving a theorem or hitting
+a target.
It's remarkable how often a situation with one source of superlinear
+returns also has the other. Crossing thresholds leads to exponential
+growth: the winning side in a battle usually suffers less damage,
+which makes them more likely to win in the future. And exponential
+growth helps you cross thresholds: in a market with network effects,
+a company that grows fast enough can shut out potential competitors.
Fame is an interesting example of a phenomenon that combines both
+sources of superlinear returns. Fame grows exponentially because
+existing fans bring you new ones. But the fundamental reason it's
+so concentrated is thresholds: there's only so much room on the
+A-list in the average person's head.
The most important case combining both sources of superlinear returns
+may be learning. Knowledge grows exponentially, but there are also
+thresholds in it. Learning to ride a bicycle, for example. Some of
+these thresholds are akin to machine tools: once you learn to read,
+you're able to learn anything else much faster. But the most important
+thresholds of all are those representing new discoveries. Knowledge
+seems to be fractal in the sense that if you push hard at the
+boundary of one area of knowledge, you sometimes discover a whole
+new field. And if you do, you get first crack at all the new
+discoveries to be made in it. Newton did this, and so did Durer and
+Darwin.
+Are there general rules for finding situations with superlinear
+returns? The most obvious one is to seek work that compounds.
There are two ways work can compound. It can compound directly, in
+the sense that doing well in one cycle causes you to do better in
+the next. That happens for example when you're building infrastructure,
+or growing an audience or brand. Or work can compound by teaching
+you, since learning compounds. This second case is an interesting
+one because you may feel you're doing badly as it's happening. You
+may be failing to achieve your immediate goal. But if you're learning
+a lot, then you're getting exponential growth nonetheless.
This is one reason Silicon Valley is so tolerant of failure. People
+in Silicon Valley aren't blindly tolerant of failure. They'll only
+continue to bet on you if you're learning from your failures. But
+if you are, you are in fact a good bet: maybe your company didn't
+grow the way you wanted, but you yourself have, and that should
+yield results eventually.
Indeed, the forms of exponential growth that don't consist of
+learning are so often intermixed with it that we should probably
+treat this as the rule rather than the exception. Which yields
+another heuristic: always be learning. If you're not learning,
+you're probably not on a path that leads to superlinear returns.
But don't overoptimize what you're learning. Don't limit yourself
+to learning things that are already known to be valuable. You're
+learning; you don't know for sure yet what's going to be valuable,
+and if you're too strict you'll lop off the outliers.
What about step functions? Are there also useful heuristics of the
+form "seek thresholds" or "seek competition?" Here the situation
+is trickier. The existence of a threshold doesn't guarantee the
+game will be worth playing. If you play a round of Russian roulette,
+you'll be in a situation with a threshold, certainly, but in the
+best case you're no better off. "Seek competition" is similarly
+useless; what if the prize isn't worth competing for? Sufficiently
+fast exponential growth guarantees both the shape and magnitude of
+the return curve — because something that grows fast enough will
+grow big even if it's trivially small at first — but thresholds
+only guarantee the shape.
+[4]
A principle for taking advantage of thresholds has to include a
+test to ensure the game is worth playing. Here's one that does: if
+you come across something that's mediocre yet still popular, it
+could be a good idea to replace it. For example, if a company makes
+a product that people dislike yet still buy, then presumably they'd
+buy a better alternative if you made one.
+[5]
It would be great if there were a way to find promising intellectual
+thresholds. Is there a way to tell which questions have whole new
+fields beyond them? I doubt we could ever predict this with certainty,
+but the prize is so valuable that it would be useful to have
+predictors that were even a little better than random, and there's
+hope of finding those. We can to some degree predict when a research
+problem isn't likely to lead to new discoveries: when it seems
+legit but boring. Whereas the kind that do lead to new discoveries
+tend to seem very mystifying, but perhaps unimportant. (If they
+were mystifying and obviously important, they'd be famous open
+questions with lots of people already working on them.) So one
+heuristic here is to be driven by curiosity rather than careerism
+— to give free rein to your curiosity instead of working on what
+you're supposed to.
+The prospect of superlinear returns for performance is an exciting
+one for the ambitious. And there's good news in this department:
+this territory is expanding in both directions. There are more types
+of work in which you can get superlinear returns, and the returns
+themselves are growing.
There are two reasons for this, though they're so closely intertwined
+that they're more like one and a half: progress in technology, and
+the decreasing importance of organizations.
Fifty years ago it used to be much more necessary to be part of an
+organization to work on ambitious projects. It was the only way to
+get the resources you needed, the only way to have colleagues, and
+the only way to get distribution. So in 1970 your prestige was in
+most cases the prestige of the organization you belonged to. And
+prestige was an accurate predictor, because if you weren't part of
+an organization, you weren't likely to achieve much. There were a
+handful of exceptions, most notably artists and writers, who worked
+alone using inexpensive tools and had their own brands. But even
+they were at the mercy of organizations for reaching audiences.
+[6]
A world dominated by organizations damped variation in the returns
+for performance. But this world has eroded significantly just in
+my lifetime. Now a lot more people can have the freedom that artists
+and writers had in the 20th century. There are lots of ambitious
+projects that don't require much initial funding, and lots of new
+ways to learn, make money, find colleagues, and reach audiences.
There's still plenty of the old world left, but the rate of change
+has been dramatic by historical standards. Especially considering
+what's at stake. It's hard to imagine a more fundamental change
+than one in the returns for performance.
Without the damping effect of institutions, there will be more
+variation in outcomes. Which doesn't imply everyone will be better
+off: people who do well will do even better, but those who do badly
+will do worse. That's an important point to bear in mind. Exposing
+oneself to superlinear returns is not for everyone. Most people
+will be better off as part of the pool. So who should shoot for
+superlinear returns? Ambitious people of two types: those who know
+they're so good that they'll be net ahead in a world with higher
+variation, and those, particularly the young, who can afford to
+risk trying it to find out.
+[7]
The switch away from institutions won't simply be an exodus of their
+current inhabitants. Many of the new winners will be people they'd
+never have let in. So the resulting democratization of opportunity
+will be both greater and more authentic than any tame intramural
+version the institutions themselves might have cooked up.
+Not everyone is happy about this great unlocking of ambition. It
+threatens some vested interests and contradicts some ideologies. [8]
+But if you're an ambitious individual it's good news for you.
+How should you take advantage of it?
The most obvious way to take advantage of superlinear returns for
+performance is by doing exceptionally good work. At the far end of
+the curve, incremental effort is a bargain. All the more so because
+there's less competition at the far end — and not just for the
+obvious reason that it's hard to do something exceptionally well,
+but also because people find the prospect so intimidating that few
+even try. Which means it's not just a bargain to do exceptional
+work, but a bargain even to try to.
There are many variables that affect how good your work is, and if
+you want to be an outlier you need to get nearly all of them right.
+For example, to do something exceptionally well, you have to be
+interested in it. Mere diligence is not enough. So in a world with
+superlinear returns, it's even more valuable to know what you're
+interested in, and to find ways to work on it.
+[9]
+It will also be
+important to choose work that suits your circumstances. For example,
+if there's a kind of work that inherently requires a huge expenditure
+of time and energy, it will be increasingly valuable to do it when
+you're young and don't yet have children.
There's a surprising amount of technique to doing great work.
+It's not just a matter of trying hard. I'm going to take a shot
+giving a recipe in one paragraph.
Choose work you have a natural aptitude for and a deep interest in.
+Develop a habit of working on your own projects; it doesn't matter
+what they are so long as you find them excitingly ambitious. Work
+as hard as you can without burning out, and this will eventually
+bring you to one of the frontiers of knowledge. These look smooth
+from a distance, but up close they're full of gaps. Notice and
+explore such gaps, and if you're lucky one will expand into a whole
+new field. Take as much risk as you can afford; if you're not failing
+occasionally you're probably being too conservative. Seek out the
+best colleagues. Develop good taste and learn from the best examples.
+Be honest, especially with yourself. Exercise and eat and sleep
+well and avoid the more dangerous drugs. When in doubt, follow your
+curiosity. It never lies, and it knows more than you do about what's
+worth paying attention to.
+[10]
And there is of course one other thing you need: to be lucky. Luck
+is always a factor, but it's even more of a factor when you're
+working on your own rather than as part of an organization. And
+though there are some valid aphorisms about luck being where
+preparedness meets opportunity and so on, there's also a component
+of true chance that you can't do anything about. The solution is
+to take multiple shots. Which is another reason to start taking
+risks early.
+The best example of a field with superlinear returns is probably
+science. It has exponential growth, in the form of learning, combined
+with thresholds at the extreme edge of performance — literally at
+the limits of knowledge.
The result has been a level of inequality in scientific discovery
+that makes the wealth inequality of even the most stratified societies
+seem mild by comparison. Newton's discoveries were arguably greater
+than all his contemporaries' combined.
+[11]
This point may seem obvious, but it might be just as well to spell
+it out. Superlinear returns imply inequality. The steeper the return
+curve, the greater the variation in outcomes.
In fact, the correlation between superlinear returns and inequality
+is so strong that it yields another heuristic for finding work of
+this type: look for fields where a few big winners outperform
+everyone else. A kind of work where everyone does about the same
+is unlikely to be one with superlinear returns.
What are fields where a few big winners outperform everyone else?
+Here are some obvious ones: sports, politics, art, music, acting,
+directing, writing, math, science, starting companies, and investing.
+In sports the phenomenon is due to externally imposed thresholds;
+you only need to be a few percent faster to win every race. In
+politics, power grows much as it did in the days of emperors. And
+in some of the other fields (including politics) success is driven
+largely by fame, which has its own source of superlinear growth.
+But when we exclude sports and politics and the effects of fame, a
+remarkable pattern emerges: the remaining list is exactly the same
+as the list of fields where you have to be independent-minded to
+succeed — where your ideas have to be not just correct, but novel
+as well.
+[12]
This is obviously the case in science. You can't publish papers
+saying things that other people have already said. But it's just
+as true in investing, for example. It's only useful to believe that
+a company will do well if most other investors don't; if everyone
+else thinks the company will do well, then its stock price will
+already reflect that, and there's no room to make money.
What else can we learn from these fields? In all of them you have
+to put in the initial effort. Superlinear returns seem small at
+first. At this rate, you find yourself thinking, I'll never get
+anywhere. But because the reward curve rises so steeply at the far
+end, it's worth taking extraordinary measures to get there.
In the startup world, the name for this principle is "do things
+that don't scale." If you pay a ridiculous amount of attention to
+your tiny initial set of customers, ideally you'll kick off exponential
+growth by word of mouth. But this same principle applies to anything
+that grows exponentially. Learning, for example. When you first
+start learning something, you feel lost. But it's worth making the
+initial effort to get a toehold, because the more you learn, the
+easier it will get.
There's another more subtle lesson in the list of fields with
+superlinear returns: not to equate work with a job. For most of the
+20th century the two were identical for nearly everyone, and as a
+result we've inherited a custom that equates productivity with
+having a job. Even now to most people the phrase "your work" means
+their job. But to a writer or artist or scientist it means whatever
+they're currently studying or creating. For someone like that, their
+work is something they carry with them from job to job, if they
+have jobs at all. It may be done for an employer, but it's part of
+their portfolio.
+It's an intimidating prospect to enter a field where a few big
+winners outperform everyone else. Some people do this deliberately,
+but you don't need to. If you have sufficient natural ability and
+you follow your curiosity sufficiently far, you'll end up in one.
+Your curiosity won't let you be interested in boring questions, and
+interesting questions tend to create fields with superlinear returns
+if they're not already part of one.
The territory of superlinear returns is by no means static. Indeed,
+the most extreme returns come from expanding it. So while both
+ambition and curiosity can get you into this territory, curiosity
+may be the more powerful of the two. Ambition tends to make you
+climb existing peaks, but if you stick close enough to an interesting
+enough question, it may grow into a mountain beneath you.
Notes
There's a limit to how sharply you can distinguish between effort,
+performance, and return, because they're not sharply distinguished
+in fact. What counts as return to one person might be performance
+to another. But though the borders of these concepts are blurry,
+they're not meaningless. I've tried to write about them as precisely
+as I could without crossing into error.
[1]
+Evolution itself is probably the most pervasive example of
+superlinear returns for performance. But this is hard for us to
+empathize with because we're not the recipients; we're the returns.
[2]
+Knowledge did of course have a practical effect before the
+Industrial Revolution. The development of agriculture changed human
+life completely. But this kind of change was the result of broad,
+gradual improvements in technique, not the discoveries of a few
+exceptionally learned people.
[3]
+It's not mathematically correct to describe a step function as
+superlinear, but a step function starting from zero works like a
+superlinear function when it describes the reward curve for effort
+by a rational actor. If it starts at zero then the part before the
+step is below any linearly increasing return, and the part after
+the step must be above the necessary return at that point or no one
+would bother.
[4]
+Seeking competition could be a good heuristic in the sense that
+some people find it motivating. It's also somewhat of a guide to
+promising problems, because it's a sign that other people find them
+promising. But it's a very imperfect sign: often there's a clamoring
+crowd chasing some problem, and they all end up being trumped by
+someone quietly working on another one.
[5]
+Not always, though. You have to be careful with this rule. When
+something is popular despite being mediocre, there's often a hidden
+reason why. Perhaps monopoly or regulation make it hard to compete.
+Perhaps customers have bad taste or have broken procedures for
+deciding what to buy. There are huge swathes of mediocre things
+that exist for such reasons.
[6]
+In my twenties I wanted to be an artist
+and even went to art
+school to study painting. Mostly because I liked art, but a nontrivial
+part of my motivation came from the fact that artists seemed least
+at the mercy of organizations.
[7]
+In principle everyone is getting superlinear returns. Learning
+compounds, and everyone learns in the course of their life. But in
+practice few push this kind of everyday learning to the point where
+the return curve gets really steep.
[8]
+It's unclear exactly what advocates of "equity" mean by it.
+They seem to disagree among themselves. But whatever they mean is
+probably at odds with a world in which institutions have less power
+to control outcomes, and a handful of outliers do much better than
+everyone else.
It may seem like bad luck for this concept that it arose at just
+the moment when the world was shifting in the opposite direction,
+but I don't think this was a coincidence. I think one reason it
+arose now is because its adherents feel threatened by rapidly
+increasing variation in performance.
[9]
+Corollary: Parents who pressure their kids to work on something
+prestigious, like medicine, even though they have no interest in
+it, will be hosing them even more than they have in the past.
[10]
+The original version of this paragraph was the first draft of
+"How to Do Great Work."
+As soon as I wrote it I realized it was a more important topic than superlinear
+returns, so I paused the present essay to expand this paragraph into its
+own. Practically nothing remains of the original version, because
+after I finished "How to Do Great Work" I rewrote it based on that.
[11]
+Before the Industrial Revolution, people who got rich usually
+did it like emperors: capturing some resource made them more powerful
+and enabled them to capture more. Now it can be done like a scientist,
+by discovering or building something uniquely valuable. Most people
+who get rich use a mix of the old and the new ways, but in the most
+advanced economies the ratio has shifted dramatically toward discovery
+just in the last half century.
[12]
+It's not surprising that conventional-minded people would
+dislike inequality if independent-mindedness is one of the biggest
+drivers of it. But it's not simply that they don't want anyone to
+have what they can't. The conventional-minded literally can't imagine
+what it's like to have novel ideas. So the whole phenomenon of great
+variation in performance seems unnatural to them, and when they
+encounter it they assume it must be due to cheating or to some
+malign external influence.
Thanks
+to Trevor Blackwell, Patrick Collusion, Tyler Cowen,
+Jessica Livingston, Harj Taggar, and Garry Tan for reading drafts
+of this.
Haystack is the open source Python framework by deepset for building custom apps with large language models (LLMs). It lets you quickly try out the latest models in natural language processing (NLP) while being flexible and easy to use. Our inspiring community of users and builders has helped shape Haystack into what it is today: a complete framework for building production-ready NLP apps.
+
Building with Haystack
+
Haystack offers comprehensive tooling for developing state-of-the-art NLP systems that use LLMs (such as GPT-4, Falcon and similar) and Transformer models . With Haystack, you can effortlessly experiment with various models hosted on platforms like Hugging Face, OpenAI, Cohere, or even models deployed on SageMaker and your local models to find the perfect fit for your use case.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Some examples of what you can build include:
+
+
Semantic search on a large collection of documents in any language
+
Generative question answering on a knowledge base containing mixed types of information: images, text, and tables.
+
Natural language chatbots powered by cutting-edge generative models like GPT-4
+
An LLM-based Haystack Agent capable of resolving complex queries
+
Information extraction from documents to populate your database or build a knowledge graph
+
+
This is just a small subset of the kinds of systems that can be created in Haystack.
+
Functionality for all stages of an NLP project
+
A successful NLP project requires more than just the language models. As an end-to-end framework, Haystack assists you in building your system every step of the way, offering tooling for each stage of the NLP project life cycle:
+
+
Effortless deployment of models from Hugging Face or other providers into your NLP pipeline
+Seamless integrations with your preferred document store (including many popular vector databases like Faiss, Pinecone, Qdrant, or Weaviate): keep your NLP-driven apps up-to-date with Haystack’s indexing pipelines that help you prepare and maintain your data
Specialized
+evaluation pipelines that use different metrics to evaluate the entire system or its individual components
+
+Haystack’s REST API to deploy your final system so that you can query it with a user-facing interface
+
+
But that’s not all:
+metadata filtering,
+model distillation, or the prompt hub, whatever your NLP heart desires, you’re likely to find it in Haystack. And if not? We’ll build it together.
Haystack uses a few simple but effective concepts to help you build fully functional and customized end-to-end NLP systems.
+
Components
+
At the core of Haystack are its components—fundamental building blocks that can perform tasks like document retrieval, text generation, or summarization. A single component is already quite powerful. It can manage local language models or communicate with a hosted model through an API.
+
While Haystack offers a bunch of components you can use out of the box, it also lets you create your own custom components. Explore the
+collection of integrations that includes custom components developed by our community, which you can freely use.
+
You can chain components together to build pipelines, which are the foundation of the NLP app architecture in Haystack.
+
Pipelines
+
Pipelines are powerful structures made up of components, such as a Retriever and Reader, connected to infrastructure building blocks, such as a DocumentStore (for example, Elasticsearch or Weaviate) to form complex systems.
+
Haystack offers ready-made pipelines for most common tasks, such as question answering, document retrieval, or summarization. But it’s just as easy to design and create a custom pipeline for NLP scenarios that are way more complex than question answering.
+
Agents
+
The Haystack Agent makes use of a large language model to resolve complex tasks. When initializing the Agent, you give it a set of tools, which can be pipeline components or whole pipelines. The Agent can use to those tools iteratively to arrive at an answer. When given a query, the Agent determines which tools are useful to answer this query and calls them in a loop until it gets the answer. This way, it can achieve much more than extractive or generative question answering pipelines.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Who’s it for?
+
Haystack is for everyone looking to build natural language apps—NLP enthusiasts and newbies alike. You don’t need to understand how the models work under the hood. With Haystack’s modular and flexible components, pipelines, and agents, all you need is some basic knowledge of Python to dive right in.
+
Our community
+
At the heart of Haystack is the vibrant open source community that thrives on the diverse backgrounds and skill sets of its members. We value collaboration greatly and encourage our users to shape Haystack actively through GitHub contributions. Our Discord channel is a space where community members can connect, seek help, and learn from each other.
+
We also organize live online and in-person events, webinars, and office hours, which are an opportunity to learn and grow.