Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- testbed/deepset-ai__haystack/.gitignore +162 -0
- testbed/deepset-ai__haystack/.pre-commit-config.yaml +37 -0
- testbed/deepset-ai__haystack/CITATION.cff +30 -0
- testbed/deepset-ai__haystack/CONTRIBUTING.md +382 -0
- testbed/deepset-ai__haystack/LICENSE +202 -0
- testbed/deepset-ai__haystack/README.md +121 -0
- testbed/deepset-ai__haystack/SECURITY.md +26 -0
- testbed/deepset-ai__haystack/VERSION.txt +1 -0
- testbed/deepset-ai__haystack/code_of_conduct.txt +98 -0
- testbed/deepset-ai__haystack/docs/README.md +57 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/audio_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/builders_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/caching_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/classifiers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/connectors.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/converters_api.yml +42 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/data_classess_api.yml +28 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/document_stores_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/document_writers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/embedders_api.yml +37 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/evaluation_api.yml +31 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/evaluators_api.yml +38 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/extractors_api.yml +29 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/fetchers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/generators_api.yml +37 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/joiners_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/pipeline_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/preprocessors_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/rankers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/readers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/retrievers_api.yml +33 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/routers_api.yml +35 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/samplers_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/utils_api.yml +28 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/validators_api.yml +27 -0
- testbed/deepset-ai__haystack/docs/pydoc/config/websearch_api.yml +27 -0
- testbed/deepset-ai__haystack/examples/README.md +5 -0
- testbed/deepset-ai__haystack/license-header.txt +3 -0
- testbed/deepset-ai__haystack/licenserc.toml +23 -0
- testbed/deepset-ai__haystack/proposals/0000-template.md +64 -0
- testbed/deepset-ai__haystack/proposals/README.md +120 -0
- testbed/deepset-ai__haystack/proposals/text/2170-pydantic-dataclasses.md +80 -0
- testbed/deepset-ai__haystack/proposals/text/3550-csv-converter.md +86 -0
- testbed/deepset-ai__haystack/proposals/text/3558-embedding_retriever.md +131 -0
- testbed/deepset-ai__haystack/proposals/text/3665-prompt-node.md +445 -0
- testbed/deepset-ai__haystack/proposals/text/3695-demo-repository.md +77 -0
- testbed/deepset-ai__haystack/proposals/text/3784-shaper.md +272 -0
- testbed/deepset-ai__haystack/proposals/text/3875-table-cell.md +238 -0
- testbed/deepset-ai__haystack/proposals/text/3925-mrkl-agent.md +380 -0
- testbed/deepset-ai__haystack/proposals/text/3959-json-converter.md +142 -0
testbed/deepset-ai__haystack/.gitignore
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Local run files
|
| 2 |
+
qa.db
|
| 3 |
+
**/qa.db
|
| 4 |
+
**/*qa*.db
|
| 5 |
+
**/test-reports
|
| 6 |
+
|
| 7 |
+
# Byte-compiled / optimized / DLL files
|
| 8 |
+
__pycache__/
|
| 9 |
+
*.py[cod]
|
| 10 |
+
*$py.class
|
| 11 |
+
|
| 12 |
+
# C extensions
|
| 13 |
+
*.so
|
| 14 |
+
|
| 15 |
+
# Distribution / packaging
|
| 16 |
+
.Python
|
| 17 |
+
build/
|
| 18 |
+
develop-eggs/
|
| 19 |
+
dist/
|
| 20 |
+
downloads/
|
| 21 |
+
eggs/
|
| 22 |
+
.eggs/
|
| 23 |
+
lib/
|
| 24 |
+
lib64/
|
| 25 |
+
parts/
|
| 26 |
+
sdist/
|
| 27 |
+
var/
|
| 28 |
+
wheels/
|
| 29 |
+
pip-wheel-metadata/
|
| 30 |
+
share/python-wheels/
|
| 31 |
+
*.egg-info/
|
| 32 |
+
.installed.cfg
|
| 33 |
+
*.egg
|
| 34 |
+
MANIFEST
|
| 35 |
+
|
| 36 |
+
# PyInstaller
|
| 37 |
+
# Usually these files are written by a python script from a template
|
| 38 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 39 |
+
*.manifest
|
| 40 |
+
*.spec
|
| 41 |
+
|
| 42 |
+
# Installer logs
|
| 43 |
+
pip-log.txt
|
| 44 |
+
pip-delete-this-directory.txt
|
| 45 |
+
|
| 46 |
+
# Unit test / coverage reports
|
| 47 |
+
htmlcov/
|
| 48 |
+
.tox/
|
| 49 |
+
.nox/
|
| 50 |
+
.coverage
|
| 51 |
+
.coverage.*
|
| 52 |
+
.cache
|
| 53 |
+
nosetests.xml
|
| 54 |
+
coverage.xml
|
| 55 |
+
*.cover
|
| 56 |
+
*.py,cover
|
| 57 |
+
.hypothesis/
|
| 58 |
+
.pytest_cache/
|
| 59 |
+
|
| 60 |
+
# Translations
|
| 61 |
+
*.mo
|
| 62 |
+
*.pot
|
| 63 |
+
|
| 64 |
+
# Django stuff:
|
| 65 |
+
*.log
|
| 66 |
+
local_settings.py
|
| 67 |
+
db.sqlite3
|
| 68 |
+
db.sqlite3-journal
|
| 69 |
+
|
| 70 |
+
# Flask stuff:
|
| 71 |
+
instance/
|
| 72 |
+
.webassets-cache
|
| 73 |
+
|
| 74 |
+
# Scrapy stuff:
|
| 75 |
+
.scrapy
|
| 76 |
+
|
| 77 |
+
# documentation
|
| 78 |
+
docs/pydoc/temp/
|
| 79 |
+
|
| 80 |
+
# PyBuilder
|
| 81 |
+
target/
|
| 82 |
+
|
| 83 |
+
# Jupyter Notebook
|
| 84 |
+
.ipynb_checkpoints
|
| 85 |
+
|
| 86 |
+
# IPython
|
| 87 |
+
profile_default/
|
| 88 |
+
ipython_config.py
|
| 89 |
+
|
| 90 |
+
# pyenv
|
| 91 |
+
.python-version
|
| 92 |
+
|
| 93 |
+
# pipenv
|
| 94 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 95 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 96 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 97 |
+
# install all needed dependencies.
|
| 98 |
+
#Pipfile.lock
|
| 99 |
+
|
| 100 |
+
# pyflow
|
| 101 |
+
__pypackages__/
|
| 102 |
+
|
| 103 |
+
# Celery stuff
|
| 104 |
+
celerybeat-schedule
|
| 105 |
+
celerybeat.pid
|
| 106 |
+
|
| 107 |
+
# SageMath parsed files
|
| 108 |
+
*.sage.py
|
| 109 |
+
|
| 110 |
+
# Environments
|
| 111 |
+
.env
|
| 112 |
+
.venv
|
| 113 |
+
env/
|
| 114 |
+
venv/
|
| 115 |
+
ENV/
|
| 116 |
+
env.bak/
|
| 117 |
+
venv.bak/
|
| 118 |
+
|
| 119 |
+
# Spyder project settings
|
| 120 |
+
.spyderproject
|
| 121 |
+
.spyproject
|
| 122 |
+
|
| 123 |
+
# Rope project settings
|
| 124 |
+
.ropeproject
|
| 125 |
+
|
| 126 |
+
# mkdocs documentation
|
| 127 |
+
/site
|
| 128 |
+
|
| 129 |
+
# mypy
|
| 130 |
+
.mypy_cache/
|
| 131 |
+
.dmypy.json
|
| 132 |
+
dmypy.json
|
| 133 |
+
|
| 134 |
+
# Pyre type checker
|
| 135 |
+
.pyre/
|
| 136 |
+
|
| 137 |
+
# PyCharm
|
| 138 |
+
.idea
|
| 139 |
+
|
| 140 |
+
# VSCode
|
| 141 |
+
.vscode
|
| 142 |
+
|
| 143 |
+
# haystack files
|
| 144 |
+
haystack/document_store/qa.db
|
| 145 |
+
data
|
| 146 |
+
**/mlruns/**
|
| 147 |
+
src
|
| 148 |
+
models
|
| 149 |
+
saved_models
|
| 150 |
+
*_build
|
| 151 |
+
rest_api/file-upload/*
|
| 152 |
+
**/feedback_squad_direct.json
|
| 153 |
+
haystack/json-schemas
|
| 154 |
+
.haystack_debug
|
| 155 |
+
|
| 156 |
+
.DS_Store
|
| 157 |
+
|
| 158 |
+
# http cache (requests-cache)
|
| 159 |
+
**/http_cache.sqlite
|
| 160 |
+
|
| 161 |
+
# ruff
|
| 162 |
+
.ruff_cache
|
testbed/deepset-ai__haystack/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fail_fast: true
|
| 2 |
+
|
| 3 |
+
repos:
|
| 4 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 5 |
+
rev: v4.6.0
|
| 6 |
+
hooks:
|
| 7 |
+
- id: check-ast # checks Python syntax
|
| 8 |
+
- id: check-json # checks JSON syntax
|
| 9 |
+
- id: check-merge-conflict # checks for no merge conflict strings
|
| 10 |
+
- id: check-shebang-scripts-are-executable # checks all shell scripts have executable permissions
|
| 11 |
+
- id: check-toml # checks TOML syntax
|
| 12 |
+
- id: check-yaml # checks YAML syntax
|
| 13 |
+
- id: end-of-file-fixer # checks there is a newline at the end of the file
|
| 14 |
+
- id: mixed-line-ending # normalizes line endings
|
| 15 |
+
- id: no-commit-to-branch # prevents committing to main
|
| 16 |
+
- id: trailing-whitespace # trims trailing whitespace
|
| 17 |
+
args: [--markdown-linebreak-ext=md]
|
| 18 |
+
|
| 19 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 20 |
+
rev: v0.5.0
|
| 21 |
+
hooks:
|
| 22 |
+
- id: ruff
|
| 23 |
+
- id: ruff-format
|
| 24 |
+
|
| 25 |
+
- repo: https://github.com/codespell-project/codespell
|
| 26 |
+
rev: v2.3.0
|
| 27 |
+
hooks:
|
| 28 |
+
- id: codespell
|
| 29 |
+
args: ["--toml", "pyproject.toml"]
|
| 30 |
+
additional_dependencies:
|
| 31 |
+
- tomli
|
| 32 |
+
|
| 33 |
+
- repo: https://github.com/rhysd/actionlint
|
| 34 |
+
rev: v1.7.1
|
| 35 |
+
hooks:
|
| 36 |
+
- id: actionlint-docker
|
| 37 |
+
args: ["-ignore", "SC2102"]
|
testbed/deepset-ai__haystack/CITATION.cff
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, please cite it using these metadata."
|
| 3 |
+
title: "Haystack: the end-to-end NLP framework for pragmatic builders"
|
| 4 |
+
date-released: 2019-11-14
|
| 5 |
+
url: "https://github.com/deepset-ai/haystack"
|
| 6 |
+
authors:
|
| 7 |
+
- family-names: Pietsch
|
| 8 |
+
given-names: Malte
|
| 9 |
+
- family-names: Möller
|
| 10 |
+
given-names: Timo
|
| 11 |
+
- family-names: Kostic
|
| 12 |
+
given-names: Bogdan
|
| 13 |
+
- family-names: Risch
|
| 14 |
+
given-names: Julian
|
| 15 |
+
- family-names: Pippi
|
| 16 |
+
given-names: Massimiliano
|
| 17 |
+
- family-names: Jobanputra
|
| 18 |
+
given-names: Mayank
|
| 19 |
+
- family-names: Zanzottera
|
| 20 |
+
given-names: Sara
|
| 21 |
+
- family-names: Cerza
|
| 22 |
+
given-names: Silvano
|
| 23 |
+
- family-names: Blagojevic
|
| 24 |
+
given-names: Vladimir
|
| 25 |
+
- family-names: Stadelmann
|
| 26 |
+
given-names: Thomas
|
| 27 |
+
- family-names: Soni
|
| 28 |
+
given-names: Tanay
|
| 29 |
+
- family-names: Lee
|
| 30 |
+
given-names: Sebastian
|
testbed/deepset-ai__haystack/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to Haystack
|
| 2 |
+
|
| 3 |
+
First off, thanks for taking the time to contribute! :blue_heart:
|
| 4 |
+
|
| 5 |
+
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents)
|
| 6 |
+
for different ways to help and details about how this project handles them. Please make sure to read
|
| 7 |
+
the relevant section before making your contribution. It will make it a lot easier for us maintainers
|
| 8 |
+
and smooth out the experience for all involved. The community looks forward to your contributions!
|
| 9 |
+
|
| 10 |
+
> [!TIP]
|
| 11 |
+
> If you like Haystack but just don't have time to contribute, that's fine. There are other easy ways to support the
|
| 12 |
+
> project and show your appreciation, which we would also be very happy about:
|
| 13 |
+
> - Star this repository
|
| 14 |
+
> - Tweet about it
|
| 15 |
+
> - Mention Haystack at local meetups and tell your friends/colleagues
|
| 16 |
+
|
| 17 |
+
**Table of Contents**
|
| 18 |
+
|
| 19 |
+
- [Contributing to Haystack](#contributing-to-haystack)
|
| 20 |
+
- [Code of Conduct](#code-of-conduct)
|
| 21 |
+
- [I Have a Question](#i-have-a-question)
|
| 22 |
+
- [Reporting Bugs](#reporting-bugs)
|
| 23 |
+
- [Before Submitting a Bug Report](#before-submitting-a-bug-report)
|
| 24 |
+
- [How Do I Submit a Good Bug Report?](#how-do-i-submit-a-good-bug-report)
|
| 25 |
+
- [Suggesting Enhancements](#suggesting-enhancements)
|
| 26 |
+
- [Before Submitting an Enhancement](#before-submitting-an-enhancement)
|
| 27 |
+
- [How Do I Submit a Good Enhancement Suggestion?](#how-do-i-submit-a-good-enhancement-suggestion)
|
| 28 |
+
- [Contribute code](#contribute-code)
|
| 29 |
+
- [Where to start](#where-to-start)
|
| 30 |
+
- [Setting up your development environment](#setting-up-your-development-environment)
|
| 31 |
+
- [Clone the git repository](#clone-the-git-repository)
|
| 32 |
+
- [Run the tests locally](#run-the-tests-locally)
|
| 33 |
+
- [Requirements for Pull Requests](#requirements-for-pull-requests)
|
| 34 |
+
- [Release notes](#release-notes)
|
| 35 |
+
- [CI (Continuous Integration)](#ci-continuous-integration)
|
| 36 |
+
- [Working from GitHub forks](#working-from-github-forks)
|
| 37 |
+
- [Writing tests](#writing-tests)
|
| 38 |
+
- [Unit test](#unit-test)
|
| 39 |
+
- [Integration test](#integration-test)
|
| 40 |
+
- [End to End (e2e) test](#end-to-end-e2e-test)
|
| 41 |
+
- [Contributor Licence Agreement (CLA)](#contributor-licence-agreement-cla)
|
| 42 |
+
|
| 43 |
+
## Code of Conduct
|
| 44 |
+
|
| 45 |
+
This project and everyone participating in it is governed by our [Code of Conduct](code_of_conduct.txt).
|
| 46 |
+
By participating, you are expected to uphold this code. Please report unacceptable behavior to haystack@deepset.ai.
|
| 47 |
+
|
| 48 |
+
## I Have a Question
|
| 49 |
+
|
| 50 |
+
> [!TIP]
|
| 51 |
+
> If you want to ask a question, we assume that you have read the available [Documentation](https://docs.haystack.deepset.ai/docs/intro).
|
| 52 |
+
|
| 53 |
+
Before you ask a question, it is best to search for existing [Issues](/issues) that might help you. In case you have
|
| 54 |
+
found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to
|
| 55 |
+
search the internet for answers first.
|
| 56 |
+
|
| 57 |
+
If you then still feel the need to ask a question and need clarification, you can use one of our
|
| 58 |
+
[Community Channels](https://haystack.deepset.ai/community), Discord, in particular, is often very helpful.
|
| 59 |
+
|
| 60 |
+
## Reporting Bugs
|
| 61 |
+
|
| 62 |
+
### Before Submitting a Bug Report
|
| 63 |
+
|
| 64 |
+
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to
|
| 65 |
+
investigate carefully, collect information, and describe the issue in detail in your report. Please complete the
|
| 66 |
+
following steps in advance to help us fix any potential bug as fast as possible.
|
| 67 |
+
|
| 68 |
+
- Make sure that you are using the latest version.
|
| 69 |
+
- Determine if your bug is really a bug and not an error on your side, for example using incompatible versions.
|
| 70 |
+
Make sure that you have read the [documentation](https://docs.haystack.deepset.ai/docs/intro). If you are looking
|
| 71 |
+
for support, you might want to check [this section](#i-have-a-question).
|
| 72 |
+
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there
|
| 73 |
+
is not already a bug report existing for your bug or error in the [bug tracker](/issues).
|
| 74 |
+
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have
|
| 75 |
+
discussed the issue.
|
| 76 |
+
- Collect information about the bug:
|
| 77 |
+
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
|
| 78 |
+
- Version of Haystack and the integrations you're using
|
| 79 |
+
- Possibly your input and the output
|
| 80 |
+
- If you can reliably reproduce the issue, a snippet of code we can use
|
| 81 |
+
|
| 82 |
+
### How Do I Submit a Good Bug Report?
|
| 83 |
+
|
| 84 |
+
> [!IMPORTANT]
|
| 85 |
+
> You must never report security-related issues, vulnerabilities, or bugs, including sensitive information to the issue
|
| 86 |
+
> tracker, or elsewhere in public. Instead, sensitive bugs must be reported using [this link](https://github.com/deepset-ai/haystack/security/advisories/new).
|
| 87 |
+
>
|
| 88 |
+
|
| 89 |
+
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
| 90 |
+
|
| 91 |
+
- Open an [Issue of type Bug Report](/issues/new?assignees=&labels=bug&projects=&template=bug_report.md&title=).
|
| 92 |
+
- Explain the behavior you would expect and the actual behavior.
|
| 93 |
+
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to
|
| 94 |
+
recreate the issue on their own. This usually includes your code. For good bug reports, you should isolate the problem
|
| 95 |
+
and create a reduced test case.
|
| 96 |
+
- Provide the information you collected in the previous section.
|
| 97 |
+
|
| 98 |
+
Once it's filed:
|
| 99 |
+
|
| 100 |
+
- The project team will label the issue accordingly.
|
| 101 |
+
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no
|
| 102 |
+
obvious way to reproduce the issue, the team will ask you for those steps.
|
| 103 |
+
- If the team is able to reproduce the issue, the issue will scheduled for a fix or left to be
|
| 104 |
+
[implemented by someone](#your-first-code-contribution).
|
| 105 |
+
|
| 106 |
+
## Suggesting Enhancements
|
| 107 |
+
|
| 108 |
+
This section guides you through submitting an enhancement suggestion, including new integrations and improvements
|
| 109 |
+
to existing ones. Following these guidelines will help maintainers and the community to understand your suggestion and
|
| 110 |
+
find related suggestions.
|
| 111 |
+
|
| 112 |
+
### Before Submitting an Enhancement
|
| 113 |
+
|
| 114 |
+
- Make sure that you are using the latest version.
|
| 115 |
+
- Read the [documentation](https://docs.haystack.deepset.ai/docs/intro) carefully and find out if the functionality
|
| 116 |
+
is already covered, maybe by an individual configuration.
|
| 117 |
+
- Perform a [search](/issues) to see if the enhancement has already been suggested. If it has, add a comment to the
|
| 118 |
+
existing issue instead of opening a new one.
|
| 119 |
+
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to
|
| 120 |
+
convince the project's developers of the merits of this feature. Keep in mind that we want features that will be
|
| 121 |
+
useful to the majority of our users and not just a small subset. If you're just targeting a minority of users,
|
| 122 |
+
consider writing and distributing the integration on your own.
|
| 123 |
+
|
| 124 |
+
### How Do I Submit a Good Enhancement Suggestion?
|
| 125 |
+
|
| 126 |
+
Enhancement suggestions are tracked as GitHub issues of type [Feature request for existing integrations](/issues/new?assignees=&labels=feature+request&projects=&template=feature-request-for-existing-integrations.md&title=).
|
| 127 |
+
|
| 128 |
+
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
| 129 |
+
- Fill the issue following the template
|
| 130 |
+
|
| 131 |
+
## Contribute code
|
| 132 |
+
|
| 133 |
+
> [!IMPORTANT]
|
| 134 |
+
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the
|
| 135 |
+
> necessary rights to the content and that the content you contribute may be provided under the project license.
|
| 136 |
+
|
| 137 |
+
### Where to start
|
| 138 |
+
|
| 139 |
+
If this is your first contribution, a good starting point is looking for an open issue that's marked with the label
|
| 140 |
+
["good first issue"](https://github.com/deepset-ai/haystack/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
|
| 141 |
+
The core contributors periodically mark certain issues as good for first-time contributors. Those issues are usually
|
| 142 |
+
limited in scope, easily fixable and low priority, so there is absolutely no reason why you should not try fixing them,
|
| 143 |
+
it's a good excuse to start looking into the project and a safe space for experimenting failure: if you don't get the
|
| 144 |
+
grasp of something, pick another one! Once you become comfortable contributing to Haystack, you can have a look at the
|
| 145 |
+
list of issues marked as [contributions wanted](https://github.com/orgs/deepset-ai/projects/14/views/1) to look for your
|
| 146 |
+
next contribution!
|
| 147 |
+
|
| 148 |
+
### Setting up your development environment
|
| 149 |
+
|
| 150 |
+
Haystack makes heavy use of [Hatch](https://hatch.pypa.io/latest/), a Python project manager that we use to set up the
|
| 151 |
+
virtual environments, build the project, and publish packages. As you can imagine, the first step towards becoming a
|
| 152 |
+
Haystack contributor is installing Hatch. There are a variety of installation methods depending on your operating system
|
| 153 |
+
platform, version, and personal taste: please have a look at [this page](https://hatch.pypa.io/latest/install/#installation)
|
| 154 |
+
and keep reading once you can run from your terminal:
|
| 155 |
+
|
| 156 |
+
```console
|
| 157 |
+
$ hatch --version
|
| 158 |
+
Hatch, version 1.9.3
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
You can create a new virtual environment for Haystack with `hatch` by running:
|
| 162 |
+
|
| 163 |
+
```console
|
| 164 |
+
$ hatch shell
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
### Clone the git repository
|
| 168 |
+
|
| 169 |
+
You won't be able to make changes directly to this repo, so the first step is to [create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo).
|
| 170 |
+
Once your fork is ready, you can clone a local copy with:
|
| 171 |
+
|
| 172 |
+
```console
|
| 173 |
+
$ git clone https://github.com/YOUR-USERNAME/haystack
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
If everything worked, you should be able to do something like this (the output might be different):
|
| 177 |
+
|
| 178 |
+
```console
|
| 179 |
+
$ cd haystack
|
| 180 |
+
$ hatch version
|
| 181 |
+
2.3.0-rc0
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
Last, install the pre-commit hooks with:
|
| 185 |
+
|
| 186 |
+
```bash
|
| 187 |
+
pre-commit install
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
This utility will run some tasks right before all `git commit` operations. From now on, your `git commit` output for
|
| 191 |
+
Haystack should look something like this:
|
| 192 |
+
|
| 193 |
+
```
|
| 194 |
+
> git commit -m "test"
|
| 195 |
+
check python ast.........................................................Passed
|
| 196 |
+
check json...........................................(no files to check)Skipped
|
| 197 |
+
check for merge conflicts................................................Passed
|
| 198 |
+
check that scripts with shebangs are executable..........................Passed
|
| 199 |
+
check toml...........................................(no files to check)Skipped
|
| 200 |
+
check yaml...........................................(no files to check)Skipped
|
| 201 |
+
fix end of files.........................................................Passed
|
| 202 |
+
mixed line ending........................................................Passed
|
| 203 |
+
don't commit to branch...................................................Passed
|
| 204 |
+
trim trailing whitespace.................................................Passed
|
| 205 |
+
ruff.....................................................................Passed
|
| 206 |
+
codespell................................................................Passed
|
| 207 |
+
Lint GitHub Actions workflow files...................(no files to check)Skipped
|
| 208 |
+
[massi/contrib d18a2577] test
|
| 209 |
+
2 files changed, 178 insertions(+), 45 deletions(-)
|
| 210 |
+
```
|
| 211 |
+
|
| 212 |
+
### Run the tests locally
|
| 213 |
+
|
| 214 |
+
Tests will automatically run in our CI for every commit you push to your PR on Github. In order to save precious CI time
|
| 215 |
+
we encourage you to run the tests locally before pushing new commits to Github. From the root of the git repository,
|
| 216 |
+
you can run all the unit tests like this:
|
| 217 |
+
|
| 218 |
+
```sh
|
| 219 |
+
hatch run test:unit
|
| 220 |
+
```
|
| 221 |
+
|
| 222 |
+
Hatch will create a dedicated virtual environment, sync the required dependencies and run all the unit tests from the
|
| 223 |
+
project. If you want to run a subset of the tests or even one test in particular, `hatch` will accept all the
|
| 224 |
+
options you would normally pass to `pytest`, for example:
|
| 225 |
+
|
| 226 |
+
```sh
|
| 227 |
+
# run one test method from a specific test class in a test file
|
| 228 |
+
hatch run test:unit test/test_logging.py::TestSkipLoggingConfiguration::test_skip_logging_configuration
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
### Run code quality checks locally
|
| 232 |
+
|
| 233 |
+
We also use tools to ensure consistent code style, quality, and static type checking. The quality of your code will be
|
| 234 |
+
tested by the CI, but once again, running the checks locally will speed up the review cycle.
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
To check your code type checking, run:
|
| 238 |
+
```sh
|
| 239 |
+
hatch run test:types
|
| 240 |
+
```
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
To check your code format run:
|
| 244 |
+
```sh
|
| 245 |
+
hatch run format-check
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
To format your code, you can run:
|
| 250 |
+
```sh
|
| 251 |
+
hatch run format
|
| 252 |
+
````
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
To check your code style according to linting rules run:
|
| 256 |
+
```sh
|
| 257 |
+
hatch run check
|
| 258 |
+
hatch run test:lint
|
| 259 |
+
````
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
If the linters spot any error, you can fix it before checking in your code:
|
| 263 |
+
```sh
|
| 264 |
+
hatch run fix
|
| 265 |
+
```
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
## Requirements for Pull Requests
|
| 269 |
+
|
| 270 |
+
To ease the review process, please follow the instructions in this paragraph when creating a Pull Request:
|
| 271 |
+
|
| 272 |
+
- For the title, use the [conventional commit convention](https://www.conventionalcommits.org/en/v1.0.0/).
|
| 273 |
+
- For the body, follow the existing [pull request template](https://github.com/deepset-ai/haystack/blob/main/.github/pull_request_template.md) to describe and document your changes.
|
| 274 |
+
|
| 275 |
+
### Release notes
|
| 276 |
+
|
| 277 |
+
Each PR must include a release notes file under the `releasenotes/notes` path created with `reno`, and a CI check will
|
| 278 |
+
fail if that's not the case. Pull requests with changes limited to tests, code comments or docstrings, and changes to
|
| 279 |
+
the CI/CD systems can be labeled with `ignore-for-release-notes` by a maintainer in order to bypass the CI check.
|
| 280 |
+
|
| 281 |
+
For example, if your PR is bumping the `transformers` version in the `pyproject.toml` file, that's something that
|
| 282 |
+
requires release notes. To create the corresponding file, from the root of the repo run:
|
| 283 |
+
|
| 284 |
+
```
|
| 285 |
+
$ hatch run release-note bump-transformers-to-4-31
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
A release notes file in YAML format will be created in the appropriate folder, appending a unique id to the name of the
|
| 289 |
+
release note you provided (in this case, `bump-transformers-to-4-31`). To add the actual content of the release notes,
|
| 290 |
+
you must edit the file that's just been created. In the file, you will find multiple sections along with an explanation
|
| 291 |
+
of what they're for. You have to remove all the sections that don't fit your release notes, in this case for example
|
| 292 |
+
you would fill in the `enhancements` section to describe the change:
|
| 293 |
+
|
| 294 |
+
```yaml
|
| 295 |
+
enhancements:
|
| 296 |
+
- |
|
| 297 |
+
Upgrade transformers to the latest version 4.31.0 so that Haystack can support the new LLama2 models.
|
| 298 |
+
```
|
| 299 |
+
|
| 300 |
+
You can now add the file to the same branch containing the code changes. Your release note will be part of your pull
|
| 301 |
+
request and reviewed along with any code you changed.
|
| 302 |
+
|
| 303 |
+
## CI (Continuous Integration)
|
| 304 |
+
|
| 305 |
+
We use GitHub Action for our Continuous Integration tasks. This means that as soon as you open a PR, GitHub will start
|
| 306 |
+
executing some workflows on your changes, like automated tests, linting, formatting, api docs generation, etc.
|
| 307 |
+
|
| 308 |
+
If all goes well, at the bottom of your PR page you should see something like this, where all checks are green.
|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
|
| 312 |
+
If you see some red checks (like the following), then something didn't work, and action is needed from your side.
|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
|
| 316 |
+
Click on the failing test and see if there are instructions at the end of the logs of the failed test.
|
| 317 |
+
For example, in the case above, the CI will give you instructions on how to fix the issue.
|
| 318 |
+
|
| 319 |
+

|
| 320 |
+
|
| 321 |
+
## Working from GitHub forks
|
| 322 |
+
|
| 323 |
+
In order for maintainers to be able to help you, we usually ask contributors to give us push access to their fork.
|
| 324 |
+
|
| 325 |
+
To do so, please verify that "Allow edits and access to secrets by maintainers" on the PR preview page is checked
|
| 326 |
+
(you can check it later on the PR's sidebar once it's created).
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+
## Writing tests
|
| 331 |
+
|
| 332 |
+
We formally define three scopes for tests in Haystack with different requirements and purposes:
|
| 333 |
+
|
| 334 |
+
### Unit test
|
| 335 |
+
- Tests a single logical concept
|
| 336 |
+
- Execution time is a few milliseconds
|
| 337 |
+
- Any external resource is mocked
|
| 338 |
+
- Always returns the same result
|
| 339 |
+
- Can run in any order
|
| 340 |
+
- Runs at every commit in PRs, automated through `hatch run test:unit`
|
| 341 |
+
- Can run locally with no additional setup
|
| 342 |
+
- **Goal: being confident in merging code**
|
| 343 |
+
|
| 344 |
+
### Integration test
|
| 345 |
+
- Tests a single logical concept
|
| 346 |
+
- Execution time is a few seconds
|
| 347 |
+
- It uses external resources that must be available before execution
|
| 348 |
+
- When using models, cannot use inference
|
| 349 |
+
- Always returns the same result or an error
|
| 350 |
+
- Can run in any order
|
| 351 |
+
- Runs at every commit in PRs, automated through `hatch run test:integration`
|
| 352 |
+
- Can run locally with some additional setup (e.g. Docker)
|
| 353 |
+
- **Goal: being confident in merging code**
|
| 354 |
+
|
| 355 |
+
### End to End (e2e) test
|
| 356 |
+
- Tests a sequence of multiple logical concepts
|
| 357 |
+
- Execution time has no limits (can be always on)
|
| 358 |
+
- Can use inference
|
| 359 |
+
- Evaluates the results of the execution or the status of the system
|
| 360 |
+
- It uses external resources that must be available before execution
|
| 361 |
+
- Can return different results
|
| 362 |
+
- Can be dependent on the order
|
| 363 |
+
- Can be wrapped into any process execution
|
| 364 |
+
- Runs outside the development cycle (nightly or on demand)
|
| 365 |
+
- Might not be possible to run locally due to system and hardware requirements
|
| 366 |
+
- **Goal: being confident in releasing Haystack**
|
| 367 |
+
|
| 368 |
+
## Contributor Licence Agreement (CLA)
|
| 369 |
+
|
| 370 |
+
Significant contributions to Haystack require a Contributor License Agreement (CLA). If the contribution requires a CLA,
|
| 371 |
+
we will get in contact with you. CLAs are quite common among company-backed open-source frameworks, and our CLA’s wording
|
| 372 |
+
is similar to other popular projects, like [Rasa](https://cla-assistant.io/RasaHQ/rasa) or
|
| 373 |
+
[Google's Tensorflow](https://cla.developers.google.com/clas/new?domain=DOMAIN_GOOGLE&kind=KIND_INDIVIDUAL)
|
| 374 |
+
(retrieved 4th November 2021).
|
| 375 |
+
|
| 376 |
+
The agreement's main purpose is to protect the continued open use of Haystack. At the same time, it also helps in
|
| 377 |
+
\protecting you as a contributor. Contributions under this agreement will ensure that your code will continue to be
|
| 378 |
+
open to everyone in the future (“You hereby grant to Deepset **and anyone** [...]”) as well as remove liabilities on
|
| 379 |
+
your end (“you provide your Contributions on an AS IS basis, without warranties or conditions of any kind [...]”). You
|
| 380 |
+
can find the Contributor Licence Agreement [here](https://cla-assistant.io/deepset-ai/haystack).
|
| 381 |
+
|
| 382 |
+
If you have further questions about the licensing, feel free to reach out to contributors@deepset.ai.
|
testbed/deepset-ai__haystack/LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright 2021 deepset GmbH
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
testbed/deepset-ai__haystack/README.md
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<a href="https://haystack.deepset.ai/"><img src="https://raw.githubusercontent.com/deepset-ai/haystack/main/docs/img/banner_20.png" alt="Green logo of a stylized white 'H' with the text 'Haystack, by deepset. Haystack 2.0 is live 🎉' Abstract green and yellow diagrams in the background."></a>
|
| 3 |
+
|
| 4 |
+
| | |
|
| 5 |
+
| ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
| 6 |
+
| CI/CD | [](https://github.com/deepset-ai/haystack/actions/workflows/tests.yml) [](https://github.com/python/mypy) [](https://coveralls.io/github/deepset-ai/haystack?branch=main) [](https://github.com/astral-sh/ruff) |
|
| 7 |
+
| Docs | [](https://docs.haystack.deepset.ai) |
|
| 8 |
+
| Package | [](https://pypi.org/project/haystack-ai/)   [](https://anaconda.org/conda-forge/haystack-ai) [](LICENSE) [](https://github.com/deepset-ai/haystack/actions/workflows/license_compliance.yml) |
|
| 9 |
+
| Meta | [](https://discord.com/invite/VBpFzsgRVF) [](https://twitter.com/haystack_ai) |
|
| 10 |
+
</div>
|
| 11 |
+
|
| 12 |
+
[Haystack](https://haystack.deepset.ai/) is an end-to-end LLM framework that allows you to build applications powered by
|
| 13 |
+
LLMs, Transformer models, vector search and more. Whether you want to perform retrieval-augmented generation (RAG),
|
| 14 |
+
document search, question answering or answer generation, Haystack can orchestrate state-of-the-art embedding models
|
| 15 |
+
and LLMs into pipelines to build end-to-end NLP applications and solve your use case.
|
| 16 |
+
|
| 17 |
+
## Installation
|
| 18 |
+
|
| 19 |
+
The simplest way to get Haystack is via pip:
|
| 20 |
+
|
| 21 |
+
```sh
|
| 22 |
+
pip install haystack-ai
|
| 23 |
+
```
|
| 24 |
+
|
| 25 |
+
Install from the `main` branch to try the newest features:
|
| 26 |
+
```sh
|
| 27 |
+
pip install git+https://github.com/deepset-ai/haystack.git@main
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
Haystack supports multiple installation methods including Docker images. For a comprehensive guide please refer
|
| 31 |
+
to the [documentation](https://docs.haystack.deepset.ai/docs/installation).
|
| 32 |
+
|
| 33 |
+
## Documentation
|
| 34 |
+
|
| 35 |
+
If you're new to the project, check out ["What is Haystack?"](https://haystack.deepset.ai/overview/intro) then go
|
| 36 |
+
through the ["Get Started Guide"](https://haystack.deepset.ai/overview/quick-start) and build your first LLM application
|
| 37 |
+
in a matter of minutes. Keep learning with the [tutorials](https://haystack.deepset.ai/tutorials). For more advanced
|
| 38 |
+
use cases, or just to get some inspiration, you can browse our Haystack recipes in the
|
| 39 |
+
[Cookbook](https://haystack.deepset.ai/cookbook).
|
| 40 |
+
|
| 41 |
+
At any given point, hit the [documentation](https://docs.haystack.deepset.ai/docs/intro) to learn more about Haystack, what can it do for you and the technology behind.
|
| 42 |
+
|
| 43 |
+
## Features
|
| 44 |
+
|
| 45 |
+
> [!IMPORTANT]
|
| 46 |
+
> **You are currently looking at the readme of Haystack 2.0**. We are still maintaining Haystack 1.x to give everyone
|
| 47 |
+
> enough time to migrate to 2.0. [Switch to Haystack 1.x here](https://github.com/deepset-ai/haystack/tree/v1.x).
|
| 48 |
+
|
| 49 |
+
- **Technology agnostic:** Allow users the flexibility to decide what vendor or technology they want and make it easy to switch out any component for another. Haystack allows you to use and compare models available from OpenAI, Cohere and Hugging Face, as well as your own local models or models hosted on Azure, Bedrock and SageMaker.
|
| 50 |
+
- **Explicit:** Make it transparent how different moving parts can “talk” to each other so it's easier to fit your tech stack and use case.
|
| 51 |
+
- **Flexible:** Haystack provides all tooling in one place: database access, file conversion, cleaning, splitting, training, eval, inference, and more. And whenever custom behavior is desirable, it's easy to create custom components.
|
| 52 |
+
- **Extensible:** Provide a uniform and easy way for the community and third parties to build their own components and foster an open ecosystem around Haystack.
|
| 53 |
+
|
| 54 |
+
Some examples of what you can do with Haystack:
|
| 55 |
+
|
| 56 |
+
- Build **retrieval augmented generation (RAG)** by making use of one of the available vector databases and customizing your LLM interaction, the sky is the limit 🚀
|
| 57 |
+
- Perform Question Answering **in natural language** to find granular answers in your documents.
|
| 58 |
+
- Perform **semantic search** and retrieve documents according to meaning.
|
| 59 |
+
- Build applications that can make complex decisions making to answer complex queries: such as systems that can resolve complex customer queries, do knowledge search on many disconnected resources and so on.
|
| 60 |
+
- Scale to millions of docs using retrievers and production-scale components.
|
| 61 |
+
- Use **off-the-shelf models** or **fine-tune** them to your data.
|
| 62 |
+
- Use **user feedback** to evaluate, benchmark, and continuously improve your models.
|
| 63 |
+
|
| 64 |
+
> [!TIP]
|
| 65 |
+
><img src="https://github.com/deepset-ai/haystack/raw/main/docs/img/deepset-cloud-logo-lightblue.png" width=30% height=30%>
|
| 66 |
+
>
|
| 67 |
+
> Are you looking for a managed solution that benefits from Haystack? [deepset Cloud](https://www.deepset.ai/deepset-cloud?utm_campaign=developer-relations&utm_source=haystack&utm_medium=readme) is our fully managed, end-to-end platform to integrate LLMs with your data, which uses Haystack for the LLM pipelines architecture.
|
| 68 |
+
|
| 69 |
+
## 🔜 Visual Pipeline Editor
|
| 70 |
+
|
| 71 |
+
Use **deepset Studio** to visually create and export your Haystack pipeline architecture as a YAML or as Python code. Learn more about it in [our announcement post](https://haystack.deepset.ai/blog/announcing-studio).
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
👉 [Join the waitlist](https://landing.deepset.ai/deepset-studio-waitlist?utm_campaign=2408%20-%20Campaign%20-%20Studio%20Launch&utm_source=github&utm_medium=referral)!
|
| 77 |
+
|
| 78 |
+
## Telemetry
|
| 79 |
+
|
| 80 |
+
Haystack collects **anonymous** usage statistics of pipeline components. We receive an event every time these components are initialized. This way, we know which components are most relevant to our community.
|
| 81 |
+
|
| 82 |
+
Read more about telemetry in Haystack or how you can opt out in [Haystack docs](https://docs.haystack.deepset.ai/docs/telemetry).
|
| 83 |
+
|
| 84 |
+
## 🖖 Community
|
| 85 |
+
|
| 86 |
+
If you have a feature request or a bug report, feel free to open an [issue in Github](https://github.com/deepset-ai/haystack/issues). We regularly check these and you can expect a quick response. If you'd like to discuss a topic, or get more general advice on how to make Haystack work for your project, you can start a thread in [Github Discussions](https://github.com/deepset-ai/haystack/discussions) or our [Discord channel](https://discord.com/invite/VBpFzsgRVF). We also check [𝕏 (Twitter)](https://twitter.com/haystack_ai) and [Stack Overflow](https://stackoverflow.com/questions/tagged/haystack).
|
| 87 |
+
|
| 88 |
+
## Contributing to Haystack
|
| 89 |
+
|
| 90 |
+
We are very open to the community's contributions - be it a quick fix of a typo, or a completely new feature! You don't need to be a Haystack expert to provide meaningful improvements. To learn how to get started, check out our [Contributor Guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) first.
|
| 91 |
+
|
| 92 |
+
There are several ways you can contribute to Haystack:
|
| 93 |
+
- Contribute to the main Haystack project
|
| 94 |
+
- Contribute an integration on [haystack-core-integrations](https://github.com/deepset-ai/haystack-core-integrations)
|
| 95 |
+
|
| 96 |
+
> [!TIP]
|
| 97 |
+
>👉 **[Check out the full list of issues that are open to contributions](https://github.com/orgs/deepset-ai/projects/14)**
|
| 98 |
+
|
| 99 |
+
## Who Uses Haystack
|
| 100 |
+
|
| 101 |
+
Here's a list of projects and companies using Haystack. Want to add yours? Open a PR, add it to the list and let the
|
| 102 |
+
world know that you use Haystack!
|
| 103 |
+
|
| 104 |
+
- [Airbus](https://www.airbus.com/en)
|
| 105 |
+
- [Alcatel-Lucent](https://www.al-enterprise.com/)
|
| 106 |
+
- [Apple](https://www.apple.com/)
|
| 107 |
+
- [BetterUp](https://www.betterup.com/)
|
| 108 |
+
- [Databricks](https://www.databricks.com/)
|
| 109 |
+
- [Deepset](https://deepset.ai/)
|
| 110 |
+
- [Etalab](https://www.deepset.ai/blog/improving-on-site-search-for-government-agencies-etalab)
|
| 111 |
+
- [Infineon](https://www.infineon.com/)
|
| 112 |
+
- [Intel](https://github.com/intel/open-domain-question-and-answer#readme)
|
| 113 |
+
- [Intelijus](https://www.intelijus.ai/)
|
| 114 |
+
- [Intel Labs](https://github.com/IntelLabs/fastRAG#readme)
|
| 115 |
+
- [LEGO](https://github.com/larsbaunwall/bricky#readme)
|
| 116 |
+
- [Netflix](https://netflix.com)
|
| 117 |
+
- [NOS Portugal](https://www.nos.pt/en/welcome)
|
| 118 |
+
- [Nvidia](https://developer.nvidia.com/blog/reducing-development-time-for-intelligent-virtual-assistants-in-contact-centers/)
|
| 119 |
+
- [PostHog](https://github.com/PostHog/max-ai#readme)
|
| 120 |
+
- [Rakuten](https://www.rakuten.com/)
|
| 121 |
+
- [Sooth.ai](https://www.deepset.ai/blog/advanced-neural-search-with-sooth-ai)
|
testbed/deepset-ai__haystack/SECURITY.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Security Policy
|
| 2 |
+
|
| 3 |
+
## Report a Vulnerability
|
| 4 |
+
|
| 5 |
+
If you found a security vulnerability in Haystack, send a message to
|
| 6 |
+
[security@deepset.ai](mailto:security@deepset.ai).
|
| 7 |
+
|
| 8 |
+
In your message, please include:
|
| 9 |
+
|
| 10 |
+
1. Reproducible steps to trigger the vulnerability.
|
| 11 |
+
2. An explanation of what makes you think there is a vulnerability.
|
| 12 |
+
3. Any information you may have on active exploitations of the vulnerability (zero-day).
|
| 13 |
+
|
| 14 |
+
## Vulnerability Response
|
| 15 |
+
|
| 16 |
+
We'll review your report within 5 business days and we will do a preliminary analysis
|
| 17 |
+
to confirm that the vulnerability is plausible. Otherwise, we'll decline the report.
|
| 18 |
+
|
| 19 |
+
We won't disclose any information you share with us but we'll use it to get the issue
|
| 20 |
+
fixed or to coordinate a vendor response, as needed.
|
| 21 |
+
|
| 22 |
+
We'll keep you updated of the status of the issue.
|
| 23 |
+
|
| 24 |
+
Our goal is to disclose bugs as soon as possible once a user mitigation is available.
|
| 25 |
+
Once we get a good understanding of the vulnerability, we'll set a disclosure date after
|
| 26 |
+
consulting the author of the report and Haystack maintainers.
|
testbed/deepset-ai__haystack/VERSION.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
2.8.0-rc0
|
testbed/deepset-ai__haystack/code_of_conduct.txt
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CONTRIBUTOR COVENANT CODE OF CONDUCT
|
| 2 |
+
====================================
|
| 3 |
+
|
| 4 |
+
Our Pledge
|
| 5 |
+
----------
|
| 6 |
+
|
| 7 |
+
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for
|
| 8 |
+
everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics,
|
| 9 |
+
gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance,
|
| 10 |
+
race, caste, color, religion, or sexual identity and orientation.
|
| 11 |
+
|
| 12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
| 13 |
+
|
| 14 |
+
Our Standards
|
| 15 |
+
-------------
|
| 16 |
+
|
| 17 |
+
Examples of behavior that contributes to a positive environment for our community include:
|
| 18 |
+
- Demonstrating empathy and kindness toward other people
|
| 19 |
+
- Being respectful of differing opinions, viewpoints, and experiences
|
| 20 |
+
- Giving and gracefully accepting constructive feedback
|
| 21 |
+
- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
| 22 |
+
- Focusing on what is best not just for us as individuals, but for the overall community
|
| 23 |
+
|
| 24 |
+
Examples of unacceptable behavior include:
|
| 25 |
+
- The use of sexualized language or imagery, and sexual attention or advances of any kind
|
| 26 |
+
- Trolling, insulting or derogatory comments, and personal or political attacks
|
| 27 |
+
- Public or private harassment
|
| 28 |
+
- Publishing others’ private information, such as a physical or email address, without their explicit permission
|
| 29 |
+
- Other conduct which could reasonably be considered inappropriate in a professional setting
|
| 30 |
+
|
| 31 |
+
Enforcement Responsibilities
|
| 32 |
+
----------------------------
|
| 33 |
+
|
| 34 |
+
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take
|
| 35 |
+
appropriate and fair corrective action in response to any behavior that they deem inappropriate,
|
| 36 |
+
threatening, offensive, or harmful.
|
| 37 |
+
|
| 38 |
+
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits,
|
| 39 |
+
issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for
|
| 40 |
+
moderation decisions when appropriate.
|
| 41 |
+
|
| 42 |
+
Scope
|
| 43 |
+
-----
|
| 44 |
+
|
| 45 |
+
This Code of Conduct applies within all community spaces, and also applies when an individual is officially
|
| 46 |
+
representing the community in public spaces. Examples of representing our community include using an official
|
| 47 |
+
e-mail address, posting via an official social media account, or acting as an appointed representative
|
| 48 |
+
at an online or offline event.
|
| 49 |
+
|
| 50 |
+
Enforcement
|
| 51 |
+
-----------
|
| 52 |
+
|
| 53 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible
|
| 54 |
+
for enforcement at engage@deepset.ai. All complaints will be reviewed and investigated promptly and fairly.
|
| 55 |
+
|
| 56 |
+
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
| 57 |
+
|
| 58 |
+
Enforcement Guidelines
|
| 59 |
+
----------------------
|
| 60 |
+
|
| 61 |
+
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action
|
| 62 |
+
they deem in violation of this Code of Conduct:
|
| 63 |
+
|
| 64 |
+
1. Correction
|
| 65 |
+
Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
| 66 |
+
|
| 67 |
+
Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation
|
| 68 |
+
and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
| 69 |
+
|
| 70 |
+
2. Warning
|
| 71 |
+
Community Impact: A violation through a single incident or series of actions.
|
| 72 |
+
|
| 73 |
+
Consequence: A warning with consequences for continued behavior. No interaction with the people involved,
|
| 74 |
+
including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time.
|
| 75 |
+
This includes avoiding interactions in community spaces as well as external channels like social media.
|
| 76 |
+
Violating these terms may lead to a temporary or permanent ban.
|
| 77 |
+
|
| 78 |
+
3. Temporary Ban
|
| 79 |
+
Community Impact: A serious violation of community standards, including sustained inappropriate behavior.
|
| 80 |
+
|
| 81 |
+
Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified
|
| 82 |
+
period of time. No public or private interaction with the people involved, including unsolicited interaction with
|
| 83 |
+
those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
| 84 |
+
|
| 85 |
+
4. Permanent Ban
|
| 86 |
+
Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
| 87 |
+
|
| 88 |
+
Consequence: A permanent ban from any sort of public interaction within the community.
|
| 89 |
+
|
| 90 |
+
Attribution
|
| 91 |
+
-----------
|
| 92 |
+
|
| 93 |
+
This Code of Conduct is adapted from the Contributor Covenant, version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
| 94 |
+
|
| 95 |
+
Community Impact Guidelines were inspired by Mozilla’s code of conduct enforcement ladder.
|
| 96 |
+
|
| 97 |
+
For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq.
|
| 98 |
+
Translations are available at https://www.contributor-covenant.org/translations.
|
testbed/deepset-ai__haystack/docs/README.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# :ledger: Looking for the docs?
|
| 2 |
+
|
| 3 |
+
You can find Haystack's documentation at https://docs.haystack.deepset.ai/.
|
| 4 |
+
|
| 5 |
+
# :computer: How to update docs?
|
| 6 |
+
|
| 7 |
+
## Overview, Components, Pipeline Nodes, and Guides
|
| 8 |
+
|
| 9 |
+
You can find these docs on the Haystack Docs page: https://docs.haystack.deepset.ai/docs/get_started. If you want to contribute, and we welcome every contribution, do the following:
|
| 10 |
+
1. Make sure you're on the right version (check the version expanding list in the top left corner).
|
| 11 |
+
2. Use the "Suggest Edits" link you can find in the top right corner of every page.
|
| 12 |
+
3. Suggest a change right in the docs and click **Submit Suggested Edits**.
|
| 13 |
+
4. Optionally, leave us a comment and submit your change.
|
| 14 |
+
|
| 15 |
+
Once we take care of it, you'll get an email telling you the change's been merged, or not. If not, we'll give you the reason why.
|
| 16 |
+
|
| 17 |
+
Make sure to check our [Contribution Guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md).
|
| 18 |
+
|
| 19 |
+
## Tutorials
|
| 20 |
+
|
| 21 |
+
The Tutorials live in a separate repo: https://github.com/deepset-ai/haystack-tutorials. For instructions on how to contribute to tutorials, see [Contributing to Tutorials](https://github.com/deepset-ai/haystack-tutorials/blob/main/Contributing.md#contributing-to-haystack-tutorials).
|
| 22 |
+
|
| 23 |
+
## API Reference
|
| 24 |
+
|
| 25 |
+
We use Pydoc-Markdown to create Markdown files from the docstrings in our code. There is a Github Action that regenerates the API pages with each commit.
|
| 26 |
+
|
| 27 |
+
If you want to generate a new Markdown file for a new Haystack module, create a `.yml` file in `docs/src/api/api` which configures how Pydoc-Markdown will generate the page and commit it to main.
|
| 28 |
+
|
| 29 |
+
All the updates to doctrings get pushed to documentation when you commit to the main branch.
|
| 30 |
+
|
| 31 |
+
### Configuration
|
| 32 |
+
|
| 33 |
+
Pydoc will read the configuration from a `.yml` file which is located under `/haystack/docs/_src/api/pydoc`. Our files contain three main sections:
|
| 34 |
+
|
| 35 |
+
- **loader**: A list of plugins that load API objects from python source files.
|
| 36 |
+
- **type**: Loader for python source files
|
| 37 |
+
- **search_path**: Location of source files
|
| 38 |
+
- **modules**: Module which are used for generating the markdown file
|
| 39 |
+
- **ignore_when_discovered**: Define which files should be ignored
|
| 40 |
+
- **processor**: A list of plugins that process API objects to modify their docstrings (e.g. to adapt them from a documentation format to Markdown or to remove items that should not be rendered into the documentation).
|
| 41 |
+
- **type: filter**: Filter for specific modules
|
| 42 |
+
- **documented_only**: Only documented API objects
|
| 43 |
+
- **do_not_filter_modules**: Do not filter module objects
|
| 44 |
+
- **skip_empty_modules**: Skip modules without content
|
| 45 |
+
- **renderer**: A plugin that produces the output files. We use a custom ReadmeRenderer based on the Markdown renderer. It makes sure the Markdown files comply with ReadMe requirements.
|
| 46 |
+
- **type**: Define the renderer which you want to use. We are using the ReadmeRenderer to make sure the files display properly in ReadMe.
|
| 47 |
+
- **excerpt**: Add a short description of the page. It shows up right below the page title.
|
| 48 |
+
- **category**: This is the ReadMe category ID to make sure the doc lands in the right section of Haystack docs.
|
| 49 |
+
- **title**: The title of the doc as it will appear on the website. Make sure you always add "API" at the end.
|
| 50 |
+
- **slug**: The page slug, each word should be separated with a dash.
|
| 51 |
+
- **order**: Pages are ordered alphabetically. This defines where in the TOC the page lands.
|
| 52 |
+
- markdown:
|
| 53 |
+
- **descriptive_class_title**: Remove the word "Object" from class titles.
|
| 54 |
+
- **descriptive_module_title**: Adding the word “Module” before the module name.
|
| 55 |
+
- **add_method_class_prefix**: Add the class name as a prefix to method names.
|
| 56 |
+
- **add_member_class_prefix**: Add the class name as a prefix to member names.
|
| 57 |
+
- **filename**: File name of the generated file, use underscores to separate each word.
|
testbed/deepset-ai__haystack/docs/pydoc/config/audio_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/audio]
|
| 4 |
+
modules: ["whisper_local", "whisper_remote"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Transcribes audio files.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Audio
|
| 19 |
+
slug: audio-api
|
| 20 |
+
order: 3
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: audio_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/builders_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/builders]
|
| 4 |
+
modules: ["answer_builder", "prompt_builder", "chat_prompt_builder"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Extract the output of a Generator to an Answer format, and build prompts.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Builders
|
| 19 |
+
slug: builders-api
|
| 20 |
+
order: 5
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: builders_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/caching_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/caching]
|
| 4 |
+
modules: ["cache_checker"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Checks if any document coming from the given URL is already present in the store.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Caching
|
| 19 |
+
slug: caching-api
|
| 20 |
+
order: 7
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: cachings_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/classifiers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/classifiers]
|
| 4 |
+
modules: ["document_language_classifier", "zero_shot_document_classifier"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Classify documents based on the provided labels.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Classifiers
|
| 19 |
+
slug: classifiers-api
|
| 20 |
+
order: 10
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: classifiers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/connectors.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/connectors]
|
| 4 |
+
modules: ["openapi_service"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Various connectors to integrate with external services.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Connectors
|
| 19 |
+
slug: connectors-api
|
| 20 |
+
order: 15
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: connectors_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/converters_api.yml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/converters]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"azure",
|
| 7 |
+
"csv",
|
| 8 |
+
"docx",
|
| 9 |
+
"html",
|
| 10 |
+
"json",
|
| 11 |
+
"markdown",
|
| 12 |
+
"openapi_functions",
|
| 13 |
+
"output_adapter",
|
| 14 |
+
"pdfminer",
|
| 15 |
+
"pptx",
|
| 16 |
+
"pypdf",
|
| 17 |
+
"tika",
|
| 18 |
+
"txt",
|
| 19 |
+
]
|
| 20 |
+
ignore_when_discovered: ["__init__"]
|
| 21 |
+
processors:
|
| 22 |
+
- type: filter
|
| 23 |
+
expression:
|
| 24 |
+
documented_only: true
|
| 25 |
+
do_not_filter_modules: false
|
| 26 |
+
skip_empty_modules: true
|
| 27 |
+
- type: smart
|
| 28 |
+
- type: crossref
|
| 29 |
+
renderer:
|
| 30 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 31 |
+
excerpt: Various converters to transform data from one format to another.
|
| 32 |
+
category_slug: haystack-api
|
| 33 |
+
title: Converters
|
| 34 |
+
slug: converters-api
|
| 35 |
+
order: 20
|
| 36 |
+
markdown:
|
| 37 |
+
descriptive_class_title: false
|
| 38 |
+
classdef_code_block: false
|
| 39 |
+
descriptive_module_title: true
|
| 40 |
+
add_method_class_prefix: true
|
| 41 |
+
add_member_class_prefix: false
|
| 42 |
+
filename: converters_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/data_classess_api.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/dataclasses]
|
| 4 |
+
modules:
|
| 5 |
+
["answer", "byte_stream", "chat_message", "document", "streaming_chunk", "sparse_embedding"]
|
| 6 |
+
ignore_when_discovered: ["__init__"]
|
| 7 |
+
processors:
|
| 8 |
+
- type: filter
|
| 9 |
+
expression:
|
| 10 |
+
documented_only: true
|
| 11 |
+
do_not_filter_modules: false
|
| 12 |
+
skip_empty_modules: true
|
| 13 |
+
- type: smart
|
| 14 |
+
- type: crossref
|
| 15 |
+
renderer:
|
| 16 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 17 |
+
excerpt: Core classes that carry data through the system.
|
| 18 |
+
category_slug: haystack-api
|
| 19 |
+
title: Data Classes
|
| 20 |
+
slug: data-classes-api
|
| 21 |
+
order: 30
|
| 22 |
+
markdown:
|
| 23 |
+
descriptive_class_title: false
|
| 24 |
+
classdef_code_block: false
|
| 25 |
+
descriptive_module_title: true
|
| 26 |
+
add_method_class_prefix: true
|
| 27 |
+
add_member_class_prefix: false
|
| 28 |
+
filename: data_classess_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/document_stores_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/document_stores/in_memory]
|
| 4 |
+
modules: ["document_store"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Stores your texts and meta data and provides them to the Retriever at query time.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Document Stores
|
| 19 |
+
slug: document-stores-api
|
| 20 |
+
order: 40
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: document_stores_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/document_writers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/writers]
|
| 4 |
+
modules: ["document_writer"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Writes Documents to a DocumentStore.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Document Writers
|
| 19 |
+
slug: document-writers-api
|
| 20 |
+
order: 50
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: document_writers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/embedders_api.yml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/embedders]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"azure_document_embedder",
|
| 7 |
+
"azure_text_embedder",
|
| 8 |
+
"hugging_face_api_document_embedder",
|
| 9 |
+
"hugging_face_api_text_embedder",
|
| 10 |
+
"openai_document_embedder",
|
| 11 |
+
"openai_text_embedder",
|
| 12 |
+
"sentence_transformers_document_embedder",
|
| 13 |
+
"sentence_transformers_text_embedder",
|
| 14 |
+
]
|
| 15 |
+
ignore_when_discovered: ["__init__"]
|
| 16 |
+
processors:
|
| 17 |
+
- type: filter
|
| 18 |
+
expression:
|
| 19 |
+
documented_only: true
|
| 20 |
+
do_not_filter_modules: false
|
| 21 |
+
skip_empty_modules: true
|
| 22 |
+
- type: smart
|
| 23 |
+
- type: crossref
|
| 24 |
+
renderer:
|
| 25 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 26 |
+
excerpt: Transforms queries into vectors to look for similar or relevant Documents.
|
| 27 |
+
category_slug: haystack-api
|
| 28 |
+
title: Embedders
|
| 29 |
+
slug: embedders-api
|
| 30 |
+
order: 60
|
| 31 |
+
markdown:
|
| 32 |
+
descriptive_class_title: false
|
| 33 |
+
classdef_code_block: false
|
| 34 |
+
descriptive_module_title: true
|
| 35 |
+
add_method_class_prefix: true
|
| 36 |
+
add_member_class_prefix: false
|
| 37 |
+
filename: embedders_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/evaluation_api.yml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/evaluation]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"base",
|
| 7 |
+
"eval_run_result",
|
| 8 |
+
]
|
| 9 |
+
ignore_when_discovered: ["__init__"]
|
| 10 |
+
processors:
|
| 11 |
+
- type: filter
|
| 12 |
+
expression:
|
| 13 |
+
documented_only: true
|
| 14 |
+
do_not_filter_modules: false
|
| 15 |
+
skip_empty_modules: true
|
| 16 |
+
- type: smart
|
| 17 |
+
- type: crossref
|
| 18 |
+
renderer:
|
| 19 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 20 |
+
excerpt: Represents the results of evaluation.
|
| 21 |
+
category_slug: haystack-api
|
| 22 |
+
title: Evaluation
|
| 23 |
+
slug: evaluation-api
|
| 24 |
+
order: 61
|
| 25 |
+
markdown:
|
| 26 |
+
descriptive_class_title: false
|
| 27 |
+
classdef_code_block: false
|
| 28 |
+
descriptive_module_title: true
|
| 29 |
+
add_method_class_prefix: true
|
| 30 |
+
add_member_class_prefix: false
|
| 31 |
+
filename: evaluation_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/evaluators_api.yml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/evaluators]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"answer_exact_match",
|
| 7 |
+
"context_relevance",
|
| 8 |
+
"document_map",
|
| 9 |
+
"document_mrr",
|
| 10 |
+
"document_ndcg",
|
| 11 |
+
"document_recall",
|
| 12 |
+
"faithfulness",
|
| 13 |
+
"llm_evaluator",
|
| 14 |
+
"sas_evaluator",
|
| 15 |
+
]
|
| 16 |
+
ignore_when_discovered: ["__init__"]
|
| 17 |
+
processors:
|
| 18 |
+
- type: filter
|
| 19 |
+
expression:
|
| 20 |
+
documented_only: true
|
| 21 |
+
do_not_filter_modules: false
|
| 22 |
+
skip_empty_modules: true
|
| 23 |
+
- type: smart
|
| 24 |
+
- type: crossref
|
| 25 |
+
renderer:
|
| 26 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 27 |
+
excerpt: Evaluate your pipelines or individual components.
|
| 28 |
+
category_slug: haystack-api
|
| 29 |
+
title: Evaluators
|
| 30 |
+
slug: evaluators-api
|
| 31 |
+
order: 63
|
| 32 |
+
markdown:
|
| 33 |
+
descriptive_class_title: false
|
| 34 |
+
classdef_code_block: false
|
| 35 |
+
descriptive_module_title: true
|
| 36 |
+
add_method_class_prefix: true
|
| 37 |
+
add_member_class_prefix: false
|
| 38 |
+
filename: evaluators_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/extractors_api.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/extractors]
|
| 4 |
+
modules: ["named_entity_extractor"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: filter
|
| 13 |
+
expression: "name not in ['_BackendEnumMeta', '_NerBackend', '_HfBackend', '_SpacyBackend']"
|
| 14 |
+
- type: smart
|
| 15 |
+
- type: crossref
|
| 16 |
+
renderer:
|
| 17 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 18 |
+
excerpt: Extracts predefined entities out of a piece of text.
|
| 19 |
+
category_slug: haystack-api
|
| 20 |
+
title: Extractors
|
| 21 |
+
slug: extractors-api
|
| 22 |
+
order: 65
|
| 23 |
+
markdown:
|
| 24 |
+
descriptive_class_title: false
|
| 25 |
+
classdef_code_block: false
|
| 26 |
+
descriptive_module_title: true
|
| 27 |
+
add_method_class_prefix: true
|
| 28 |
+
add_member_class_prefix: false
|
| 29 |
+
filename: extractors_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/fetchers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/fetchers]
|
| 4 |
+
modules: ["link_content"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Fetches content from a list of URLs and returns a list of extracted content streams.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Fetchers
|
| 19 |
+
slug: fetchers-api
|
| 20 |
+
order: 80
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: fetchers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/generators_api.yml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/generators]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"azure",
|
| 7 |
+
"hugging_face_local",
|
| 8 |
+
"hugging_face_api",
|
| 9 |
+
"openai",
|
| 10 |
+
"chat/azure",
|
| 11 |
+
"chat/hugging_face_local",
|
| 12 |
+
"chat/hugging_face_api",
|
| 13 |
+
"chat/openai",
|
| 14 |
+
]
|
| 15 |
+
ignore_when_discovered: ["__init__"]
|
| 16 |
+
processors:
|
| 17 |
+
- type: filter
|
| 18 |
+
expression:
|
| 19 |
+
documented_only: true
|
| 20 |
+
do_not_filter_modules: false
|
| 21 |
+
skip_empty_modules: true
|
| 22 |
+
- type: smart
|
| 23 |
+
- type: crossref
|
| 24 |
+
renderer:
|
| 25 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 26 |
+
excerpt: Enables text generation using LLMs.
|
| 27 |
+
category_slug: haystack-api
|
| 28 |
+
title: Generators
|
| 29 |
+
slug: generators-api
|
| 30 |
+
order: 70
|
| 31 |
+
markdown:
|
| 32 |
+
descriptive_class_title: false
|
| 33 |
+
classdef_code_block: false
|
| 34 |
+
descriptive_module_title: true
|
| 35 |
+
add_method_class_prefix: true
|
| 36 |
+
add_member_class_prefix: false
|
| 37 |
+
filename: generators_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/joiners_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/joiners]
|
| 4 |
+
modules: ["document_joiner", "branch", "answer_joiner", "string_joiner"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Components that join list of different objects
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Joiners
|
| 19 |
+
slug: joiners-api
|
| 20 |
+
order: 75
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: joiners_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/pipeline_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/core/pipeline]
|
| 4 |
+
modules: ["pipeline"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Arranges components and integrations in flow.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Pipeline
|
| 19 |
+
slug: pipeline-api
|
| 20 |
+
order: 90
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: pipeline_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/preprocessors_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/preprocessors]
|
| 4 |
+
modules: ["document_cleaner", "document_splitter", "text_cleaner", "nltk_document_splitter"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Preprocess your Documents and texts. Clean, split, and more.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: PreProcessors
|
| 19 |
+
slug: preprocessors-api
|
| 20 |
+
order: 100
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: preprocessors_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/rankers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/rankers]
|
| 4 |
+
modules: ["lost_in_the_middle", "meta_field", "transformers_similarity", "sentence_transformers_diversity"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Reorders a set of Documents based on their relevance to the query.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Rankers
|
| 19 |
+
slug: rankers-api
|
| 20 |
+
order: 110
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: rankers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/readers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/readers]
|
| 4 |
+
modules: ["extractive"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Takes a query and a set of Documents as input and returns ExtractedAnswers by selecting a text span within the Documents.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Readers
|
| 19 |
+
slug: readers-api
|
| 20 |
+
order: 120
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: readers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/retrievers_api.yml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/retrievers]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"in_memory/bm25_retriever",
|
| 7 |
+
"in_memory/embedding_retriever",
|
| 8 |
+
"filter_retriever",
|
| 9 |
+
"sentence_window_retriever",
|
| 10 |
+
]
|
| 11 |
+
ignore_when_discovered: ["__init__"]
|
| 12 |
+
processors:
|
| 13 |
+
- type: filter
|
| 14 |
+
expression:
|
| 15 |
+
documented_only: true
|
| 16 |
+
do_not_filter_modules: false
|
| 17 |
+
skip_empty_modules: true
|
| 18 |
+
- type: smart
|
| 19 |
+
- type: crossref
|
| 20 |
+
renderer:
|
| 21 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 22 |
+
excerpt: Sweeps through a Document Store and returns a set of candidate Documents that are relevant to the query.
|
| 23 |
+
category_slug: haystack-api
|
| 24 |
+
title: Retrievers
|
| 25 |
+
slug: retrievers-api
|
| 26 |
+
order: 130
|
| 27 |
+
markdown:
|
| 28 |
+
descriptive_class_title: false
|
| 29 |
+
classdef_code_block: false
|
| 30 |
+
descriptive_module_title: true
|
| 31 |
+
add_method_class_prefix: true
|
| 32 |
+
add_member_class_prefix: false
|
| 33 |
+
filename: retrievers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/routers_api.yml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/routers]
|
| 4 |
+
modules:
|
| 5 |
+
[
|
| 6 |
+
"conditional_router",
|
| 7 |
+
"file_type_router",
|
| 8 |
+
"metadata_router",
|
| 9 |
+
"text_language_router",
|
| 10 |
+
"transformers_text_router",
|
| 11 |
+
"zero_shot_text_router",
|
| 12 |
+
]
|
| 13 |
+
ignore_when_discovered: ["__init__"]
|
| 14 |
+
processors:
|
| 15 |
+
- type: filter
|
| 16 |
+
expression:
|
| 17 |
+
documented_only: true
|
| 18 |
+
do_not_filter_modules: false
|
| 19 |
+
skip_empty_modules: true
|
| 20 |
+
- type: smart
|
| 21 |
+
- type: crossref
|
| 22 |
+
renderer:
|
| 23 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 24 |
+
excerpt: Routers is a group of components that route queries or Documents to other components that can handle them best.
|
| 25 |
+
category_slug: haystack-api
|
| 26 |
+
title: Routers
|
| 27 |
+
slug: routers-api
|
| 28 |
+
order: 140
|
| 29 |
+
markdown:
|
| 30 |
+
descriptive_class_title: false
|
| 31 |
+
classdef_code_block: false
|
| 32 |
+
descriptive_module_title: true
|
| 33 |
+
add_method_class_prefix: true
|
| 34 |
+
add_member_class_prefix: false
|
| 35 |
+
filename: routers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/samplers_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/samplers]
|
| 4 |
+
modules: ["top_p"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Filters documents based on their similarity scores using top-p sampling.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Samplers
|
| 19 |
+
slug: samplers-api
|
| 20 |
+
order: 150
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: samplers_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/utils_api.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/utils]
|
| 4 |
+
ignore_when_discovered: ["__init__", "hf"]
|
| 5 |
+
processors:
|
| 6 |
+
- type: filter
|
| 7 |
+
expression:
|
| 8 |
+
documented_only: true
|
| 9 |
+
do_not_filter_modules: false
|
| 10 |
+
skip_empty_modules: true
|
| 11 |
+
- type: filter
|
| 12 |
+
expression: "name not in ['TokenSecret', 'EnvVarSecret','_get_default_device', '_split_device_string', 'convert']"
|
| 13 |
+
- type: smart
|
| 14 |
+
- type: crossref
|
| 15 |
+
renderer:
|
| 16 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 17 |
+
excerpt: Utility functions and classes used across the library.
|
| 18 |
+
category_slug: haystack-api
|
| 19 |
+
title: Utils
|
| 20 |
+
slug: utils-api
|
| 21 |
+
order: 153
|
| 22 |
+
markdown:
|
| 23 |
+
descriptive_class_title: false
|
| 24 |
+
classdef_code_block: false
|
| 25 |
+
descriptive_module_title: true
|
| 26 |
+
add_method_class_prefix: true
|
| 27 |
+
add_member_class_prefix: false
|
| 28 |
+
filename: utils_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/validators_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/validators]
|
| 4 |
+
modules: ["json_schema"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Validators validate LLM outputs
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Validators
|
| 19 |
+
slug: validators-api
|
| 20 |
+
order: 155
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: validators_api.md
|
testbed/deepset-ai__haystack/docs/pydoc/config/websearch_api.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
loaders:
|
| 2 |
+
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
|
| 3 |
+
search_path: [../../../haystack/components/websearch]
|
| 4 |
+
modules: ["serper_dev", "searchapi"]
|
| 5 |
+
ignore_when_discovered: ["__init__"]
|
| 6 |
+
processors:
|
| 7 |
+
- type: filter
|
| 8 |
+
expression:
|
| 9 |
+
documented_only: true
|
| 10 |
+
do_not_filter_modules: false
|
| 11 |
+
skip_empty_modules: true
|
| 12 |
+
- type: smart
|
| 13 |
+
- type: crossref
|
| 14 |
+
renderer:
|
| 15 |
+
type: haystack_pydoc_tools.renderers.ReadmeCoreRenderer
|
| 16 |
+
excerpt: Web search engine for Haystack.
|
| 17 |
+
category_slug: haystack-api
|
| 18 |
+
title: Websearch
|
| 19 |
+
slug: websearch-api
|
| 20 |
+
order: 170
|
| 21 |
+
markdown:
|
| 22 |
+
descriptive_class_title: false
|
| 23 |
+
classdef_code_block: false
|
| 24 |
+
descriptive_module_title: true
|
| 25 |
+
add_method_class_prefix: true
|
| 26 |
+
add_member_class_prefix: false
|
| 27 |
+
filename: websearch_api.md
|
testbed/deepset-ai__haystack/examples/README.md
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Examples have been moved!
|
| 2 |
+
|
| 3 |
+
If you're searching for Haystack examples we moved them into a dedicated repository.
|
| 4 |
+
|
| 5 |
+
You can find all the example cookbooks [👉 here 👈](https://github.com/deepset-ai/haystack-cookbook/).
|
testbed/deepset-ai__haystack/license-header.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
SPDX-FileCopyrightText: ${inceptionYear}-present ${copyrightOwner}
|
| 2 |
+
|
| 3 |
+
SPDX-License-Identifier: Apache-2.0
|
testbed/deepset-ai__haystack/licenserc.toml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
headerPath = "license-header.txt"
|
| 2 |
+
|
| 3 |
+
excludes = [
|
| 4 |
+
".github",
|
| 5 |
+
"docker",
|
| 6 |
+
"docs",
|
| 7 |
+
"examples",
|
| 8 |
+
"proposals",
|
| 9 |
+
"releasenotes",
|
| 10 |
+
"test",
|
| 11 |
+
"CITATION.cff",
|
| 12 |
+
"*.ini",
|
| 13 |
+
"*.jinja2",
|
| 14 |
+
"*.md",
|
| 15 |
+
"*.toml",
|
| 16 |
+
"*.txt",
|
| 17 |
+
"*.yaml",
|
| 18 |
+
"*.wav",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
[properties]
|
| 22 |
+
inceptionYear = 2022
|
| 23 |
+
copyrightOwner = "deepset GmbH <info@deepset.ai>"
|
testbed/deepset-ai__haystack/proposals/0000-template.md
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Title: (the title in plain english)
|
| 2 |
+
- Decision driver: (your name here)
|
| 3 |
+
- Start Date: (today's date, in format YYYY-MM-DD)
|
| 4 |
+
- Proposal PR: (fill in after opening the PR)
|
| 5 |
+
- Github Issue or Discussion: (only if available, link the original request for this change)
|
| 6 |
+
|
| 7 |
+
# Summary
|
| 8 |
+
|
| 9 |
+
Explain the feature briefly.
|
| 10 |
+
|
| 11 |
+
# Basic example
|
| 12 |
+
|
| 13 |
+
When applicable, write a snippet of code showing how the new feature would
|
| 14 |
+
be used.
|
| 15 |
+
|
| 16 |
+
# Motivation
|
| 17 |
+
|
| 18 |
+
Give us more background and explanation: Why do we need this feature? What use cases does it support? What's the expected
|
| 19 |
+
outcome?
|
| 20 |
+
|
| 21 |
+
Focus on explaining the motivation for this feature. We'd like to understand it, so that even if we don't accept this
|
| 22 |
+
proposal, others can use the motivation to develop alternative solutions.
|
| 23 |
+
|
| 24 |
+
# Detailed design
|
| 25 |
+
|
| 26 |
+
This is the bulk of the proposal. Explain the design in enough detail for somebody
|
| 27 |
+
familiar with Haystack to understand, and for somebody familiar with the
|
| 28 |
+
implementation to implement. Get into specifics and corner-cases,
|
| 29 |
+
and include examples of how the feature is used. Also, if there's any new terminology involved,
|
| 30 |
+
define it here.
|
| 31 |
+
|
| 32 |
+
# Drawbacks
|
| 33 |
+
|
| 34 |
+
Look at the feature from the other side: what are the reasons why we should _not_ work on it? Consider the following:
|
| 35 |
+
|
| 36 |
+
- What's the implementation cost, both in terms of code size and complexity?
|
| 37 |
+
- Can the solution you're proposing be implemented as a separate package, outside of Haystack?
|
| 38 |
+
- Does it teach people more about Haystack?
|
| 39 |
+
- How does this feature integrate with other existing and planned features?
|
| 40 |
+
- What's the cost of migrating existing Haystack pipelines (is it a breaking change?)?
|
| 41 |
+
|
| 42 |
+
There are tradeoffs to choosing any path. Attempt to identify them here.
|
| 43 |
+
|
| 44 |
+
# Alternatives
|
| 45 |
+
|
| 46 |
+
What other designs have you considered? What's the impact of not adding this feature?
|
| 47 |
+
|
| 48 |
+
# Adoption strategy
|
| 49 |
+
|
| 50 |
+
If we implement this proposal, how will the existing Haystack users adopt it? Is
|
| 51 |
+
this a breaking change? Can we write a migration script?
|
| 52 |
+
|
| 53 |
+
# How we teach this
|
| 54 |
+
|
| 55 |
+
Would implementing this feature mean the documentation must be re-organized
|
| 56 |
+
or updated? Does it change how Haystack is taught to new developers at any level?
|
| 57 |
+
|
| 58 |
+
How should this feature be taught to the existing Haystack users (for example with a page in the docs,
|
| 59 |
+
a tutorial, ...).
|
| 60 |
+
|
| 61 |
+
# Unresolved questions
|
| 62 |
+
|
| 63 |
+
Optional, but suggested for first drafts. What parts of the design are still
|
| 64 |
+
TBD?
|
testbed/deepset-ai__haystack/proposals/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Haystack proposals design process
|
| 2 |
+
|
| 3 |
+
Most of the changes to Haystack, including bug fixes and small improvements,
|
| 4 |
+
are implemented through the normal Pull Request workflow, according to our
|
| 5 |
+
[contribution guidelines](../CONTRIBUTING.md).
|
| 6 |
+
|
| 7 |
+
Some changes, though, are "substantial", and these are the ones we want to put through a bit
|
| 8 |
+
of a design process to make sure we're all on the same page before we invest the time
|
| 9 |
+
into the actual implementation of a new feature or a deep refactoring.
|
| 10 |
+
|
| 11 |
+
We've introduced the "Proposals design process" to provide a
|
| 12 |
+
consistent and controlled path for such changes to Haystack.
|
| 13 |
+
|
| 14 |
+
We will apply the same level of rigor to both core developers' and
|
| 15 |
+
Community's proposals. The primary difference between them is in the design phase:
|
| 16 |
+
core developers proposals tend to be submitted at the end of the design process
|
| 17 |
+
whereas the Community ones tend to be submitted at the beginning, as a way
|
| 18 |
+
to kickstart it.
|
| 19 |
+
|
| 20 |
+
## When do I follow the process?
|
| 21 |
+
|
| 22 |
+
Follow the process if you intend to make "substantial" changes to Haystack, `rest_api` or the process itself. What is
|
| 23 |
+
defined as a "substantial" change is evolving based on community norms and on what part of the project you are proposing
|
| 24 |
+
to change, but it may include the following:
|
| 25 |
+
|
| 26 |
+
- A new feature that creates new API surface areas.
|
| 27 |
+
- A new component (Nodes, Pipelines, Document Stores).
|
| 28 |
+
- Removing features that already shipped in the current minor version.
|
| 29 |
+
- A deep refactoring that would require new tests or introduce new dependencies.
|
| 30 |
+
- A change that's complex enough to require multiple steps to be delivered.
|
| 31 |
+
|
| 32 |
+
Some changes don't require a proposal, for example:
|
| 33 |
+
|
| 34 |
+
- Minor bug fixes.
|
| 35 |
+
- Rephrasing, reorganizing, or otherwise "changing shape does not change meaning".
|
| 36 |
+
- Addition and removal of warnings or other error messages.
|
| 37 |
+
- Additions only likely to be noticed by other contributors, invisible to Haystack users.
|
| 38 |
+
|
| 39 |
+
In any case, the core developers might politely ask you to submit a proposal before merging
|
| 40 |
+
a new feature when they see fit.
|
| 41 |
+
|
| 42 |
+
## Before creating a proposal
|
| 43 |
+
|
| 44 |
+
Laying some groundwork ahead of the proposal can make the process smoother.
|
| 45 |
+
|
| 46 |
+
Although there is no single way to prepare for submitting a proposal, it is generally a good idea
|
| 47 |
+
to collect feedback from other project developers first, to make sure that the change is
|
| 48 |
+
is actually needed. As we're an open source community where everyone can impact the project, we all
|
| 49 |
+
need to make an effort to build consensus.
|
| 50 |
+
|
| 51 |
+
When you're preparing for writing and submitting a proposal, talk the idea over on our official
|
| 52 |
+
[Discord server](https://haystack.deepset.ai/community/join) and in a Github
|
| 53 |
+
issue or discussion in the [Haystack repository](https://github.com/deepset-ai/haystack).
|
| 54 |
+
|
| 55 |
+
## The process
|
| 56 |
+
|
| 57 |
+
To get a major feature added to Haystack, you first merge the proposal into the Haystack repo as a Markdown file.
|
| 58 |
+
At that point, the proposal can be implemented and eventually included into the codebase.
|
| 59 |
+
|
| 60 |
+
There are several people involved in the process:
|
| 61 |
+
- **Decision Driver**: the person creating the proposal. If the Decision Driver is not a core contributor themselves,
|
| 62 |
+
one will be assigned to the PR and will take care of facilitating the process.
|
| 63 |
+
- **Input Givers**: anybody reviewing or commenting the PR.
|
| 64 |
+
- **Approvers**: the core contributors approving the PR.
|
| 65 |
+
|
| 66 |
+
During its lifecycle, a proposal can transition between the following states:
|
| 67 |
+
- **Review**: proposal is getting feedback.
|
| 68 |
+
- **Final Comment**: proposal received approval from 3 core contributors; this state must be kept for a grace period of
|
| 69 |
+
3 calendar days.
|
| 70 |
+
- **Active**: proposal was approved and merged and can be implemented if not already.
|
| 71 |
+
- **Stale**: proposal didn't get any update in the last 30 days and will be closed after a grace period of 10 days.
|
| 72 |
+
- **Rejected**: proposal was actively rejected and the reasons explained.
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
To create a proposal:
|
| 76 |
+
|
| 77 |
+
1. Copy `0000-template.md` to `text/0000-my-feature.md`, where 'my-feature' is a descriptive name of the feature you're
|
| 78 |
+
proposing. Don't assign an identification number yet.
|
| 79 |
+
2. Fill in the proposal. Pay attention to details. Proposals that present convincing motivation,
|
| 80 |
+
demonstrate an understanding of the feature impact, and honestly present the drawbacks and
|
| 81 |
+
alternatives tend to be received well.
|
| 82 |
+
3. Submit a pull request. This ensures the document receives design feedback from a larger community,
|
| 83 |
+
and as the Decision Driver, you should be prepared to revise it in response.
|
| 84 |
+
4. Rename the file using the PR number, for example from `text/0000-my-feature.md` to `text/4242-my-feature.md`. The
|
| 85 |
+
proposal is now in **Review** state.
|
| 86 |
+
5. Reach an agreement with the Input Givers and integrate the feedback you got. Proposals that have broad support are
|
| 87 |
+
much more likely to make progress than those that don't receive any comments.
|
| 88 |
+
6. Now it's time for the Approvers to decide whether the proposal is a candidate for inclusion in Haystack. Note that a
|
| 89 |
+
review from the core contributors may take a long time, and getting early feedback from members of the Community can
|
| 90 |
+
ease the process.
|
| 91 |
+
7. When the proposal enters the **Final Comment** state (see above), the PR will be marked accordingly, entering a
|
| 92 |
+
grace period lasting 3 calendar days during which a proposal can be modified based on feedback from core developers
|
| 93 |
+
or the Community. Big changes may trigger a new final comment period.
|
| 94 |
+
8. Approvers may reject a proposal once the public discussion and adding comments are over, adding the reason for
|
| 95 |
+
rejection. A core developer then closes the related PR. The proposal gets the **Rejected** state.
|
| 96 |
+
9. When the final comment period ends, the PR is merged and the proposal becomes **Active**.
|
| 97 |
+
|
| 98 |
+
## What happens next
|
| 99 |
+
|
| 100 |
+
Once a proposal becomes active, the authors are free to implement it and submit the feature as one or more pull
|
| 101 |
+
requests to the Haystack repo. Becoming 'active' is not a rubber stamp, and in particular still doesn't
|
| 102 |
+
mean the feature will ultimately be merged; it does mean that the core team has agreed to it in
|
| 103 |
+
principle and is open to merging it if the implementation reflects the contents of the proposal.
|
| 104 |
+
|
| 105 |
+
The fact that a given proposal has been accepted and is 'active' doesn't imply it has a priority assigned or somebody's
|
| 106 |
+
currently working on it.
|
| 107 |
+
|
| 108 |
+
To change an active proposal, open follow-up PRs. Our goal is to write each proposal so that
|
| 109 |
+
it reflects the final design of the feature, but the nature of the process means that we cannot
|
| 110 |
+
expect every merged proposal to actually reflect what the end result will be at the time of the next release.
|
| 111 |
+
That's why we try to keep each proposal document somewhat in sync with the feature as planned, tracking such
|
| 112 |
+
changes through follow-up pull requests to the document.
|
| 113 |
+
|
| 114 |
+
As the author of a proposal, you're not obligated to implement it. Of course, the author (like any other developer)
|
| 115 |
+
is welcome to post an implementation for review after the proposal has been accepted.
|
| 116 |
+
|
| 117 |
+
## Inspiration
|
| 118 |
+
|
| 119 |
+
Haystack's proposals design process process owes its inspiration to the [React](https://github.com/reactjs/rfcs) and
|
| 120 |
+
[Rust](https://github.com/rust-lang/rfcs) RFC processes. We're open to changing it if needed.
|
testbed/deepset-ai__haystack/proposals/text/2170-pydantic-dataclasses.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
NOTE: this document was imported from a different process and is not compliant with the proposal template. Do not
|
| 3 |
+
use it as a reference for new proposals.
|
| 4 |
+
-->
|
| 5 |
+
- Start Date: 2021-10-14
|
| 6 |
+
- Proposal PR: n/a
|
| 7 |
+
- Github Issue: https://github.com/deepset-ai/haystack/pull/1598
|
| 8 |
+
- Deciders: @tholor
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
## Context and Problem Statement
|
| 12 |
+
|
| 13 |
+
Originally we implemented Haystack's primitive based on Python's vanilla `dataclasses`. However, shortly after we realized this causes issues with FastAPI, which uses Pydantic's implementation. We need to decide which version (vanilla Python's or Pydantic's) to use in our codebase.
|
| 14 |
+
|
| 15 |
+
## Decision Drivers
|
| 16 |
+
|
| 17 |
+
* The Swagger autogenerated documentation for REST API in FastAPI was broken where the dataclasses include non-standard fields (`pd.dataframe` + `np.ndarray`)
|
| 18 |
+
|
| 19 |
+
## Considered Options
|
| 20 |
+
|
| 21 |
+
* Switch to Pydantic `dataclasses` in our codebase as well.
|
| 22 |
+
* Staying with vanilla `dataclasses` and find a workaround for FastAPI to accept them in place of Pydantic's implementation.
|
| 23 |
+
|
| 24 |
+
## Decision Outcome
|
| 25 |
+
|
| 26 |
+
Chosen option: **1**, because our initial concerns about speed proved negligible and Pydantic's implementation provided some additional functionality for free (see below).
|
| 27 |
+
|
| 28 |
+
### Positive Consequences
|
| 29 |
+
|
| 30 |
+
* We can now inherit directly from the primitives in the REST API dataclasses, and overwrite the problematic fields with standard types.
|
| 31 |
+
* We now get runtime type checks "for free", as this is a core feature of Pydantic's implementation.
|
| 32 |
+
|
| 33 |
+
### Negative Consequences
|
| 34 |
+
|
| 35 |
+
* Pydantic dataclasses are slower. See https://github.com/deepset-ai/haystack/pull/1598 for a rough performance assessment.
|
| 36 |
+
* Pydantic dataclasses do not play nice with mypy and autocomplete tools unaided. In many cases a complex import statement, such as the following, is needed:
|
| 37 |
+
|
| 38 |
+
```python
|
| 39 |
+
if typing.TYPE_CHECKING:
|
| 40 |
+
from dataclasses import dataclass
|
| 41 |
+
else:
|
| 42 |
+
from pydantic.dataclasses import dataclass
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
## Pros and Cons of the Options
|
| 46 |
+
|
| 47 |
+
### Switch to Pydantic `dataclasses`
|
| 48 |
+
|
| 49 |
+
* Good, because it solves the issue without having to find workarounds for FastAPI.
|
| 50 |
+
* Good, because it adds type checks at runtime.
|
| 51 |
+
* Bad, because mypy and autocomplete tools need assistance to parse its dataclasses properly. Example:
|
| 52 |
+
|
| 53 |
+
```python
|
| 54 |
+
if typing.TYPE_CHECKING:
|
| 55 |
+
from dataclasses import dataclass
|
| 56 |
+
else:
|
| 57 |
+
from pydantic.dataclasses import dataclass
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
* Bad, because it introduces an additional dependency to Haystack (negligible)
|
| 61 |
+
* Bad, because it adds some overhead on the creation of primitives (negligible)
|
| 62 |
+
|
| 63 |
+
### Staying with vanilla `dataclasses`
|
| 64 |
+
|
| 65 |
+
* Good, because it's Python's standard way to generate data classes
|
| 66 |
+
* Good, because mypy can deal with them without plugins or other tricks.
|
| 67 |
+
* Good, because it's faster than Pydantic's implementation.
|
| 68 |
+
* Bad, because does not play well with FastAPI and Swagger (critical).
|
| 69 |
+
* Bad, because it has no validation at runtime (negligible)
|
| 70 |
+
|
| 71 |
+
## Links <!-- optional -->
|
| 72 |
+
|
| 73 |
+
* https://pydantic-docs.helpmanual.io/usage/dataclasses/
|
| 74 |
+
* https://github.com/deepset-ai/haystack/pull/1598
|
| 75 |
+
* https://github.com/deepset-ai/haystack/issues/1593
|
| 76 |
+
* https://github.com/deepset-ai/haystack/issues/1582
|
| 77 |
+
* https://github.com/deepset-ai/haystack/pull/1398
|
| 78 |
+
* https://github.com/deepset-ai/haystack/issues/1232
|
| 79 |
+
|
| 80 |
+
<!-- markdownlint-disable-file MD013 -->
|
testbed/deepset-ai__haystack/proposals/text/3550-csv-converter.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Start Date: 2022-11-28
|
| 2 |
+
- Proposal PR: [#3638](https://github.com/deepset-ai/haystack/issues/3638)
|
| 3 |
+
- Github Issue: [#3550](https://github.com/deepset-ai/haystack/issues/3550)
|
| 4 |
+
|
| 5 |
+
# Summary
|
| 6 |
+
|
| 7 |
+
Using Haystack for questions answering pipelines is prettier easy, but most of the time users have CSV files containing
|
| 8 |
+
their knowledge base with questions and there associated answers.
|
| 9 |
+
Unfortunately there is no easy way to dynamically update the knowledge base or import new data from CSV though rest API
|
| 10 |
+
using YAML, as there are no CSV parser.
|
| 11 |
+
|
| 12 |
+
Having a basic way to dynamically index a CSV file always requires development of a new nodes.
|
| 13 |
+
|
| 14 |
+
# Basic example
|
| 15 |
+
|
| 16 |
+
To define an FAQ query and **indexing** pipeline we would then simply do :
|
| 17 |
+
```yaml
|
| 18 |
+
# To allow your IDE to autocomplete and validate your YAML pipelines, name them as <name of your choice>.haystack-pipeline.yml
|
| 19 |
+
|
| 20 |
+
version: ignore
|
| 21 |
+
|
| 22 |
+
components: # define all the building-blocks for Pipeline
|
| 23 |
+
- name: DocumentStore
|
| 24 |
+
type: ElasticsearchDocumentStore
|
| 25 |
+
params:
|
| 26 |
+
host: localhost
|
| 27 |
+
embedding_field: question_emb
|
| 28 |
+
embedding_dim: 384
|
| 29 |
+
excluded_meta_data:
|
| 30 |
+
- question_emb
|
| 31 |
+
similarity: cosine
|
| 32 |
+
- name: Retriever
|
| 33 |
+
type: EmbeddingRetriever
|
| 34 |
+
params:
|
| 35 |
+
document_store: DocumentStore # params can reference other components defined in the YAML
|
| 36 |
+
embedding_model: sentence-transformers/all-MiniLM-L6-v2
|
| 37 |
+
scale_score: False
|
| 38 |
+
- name: CSVConverter
|
| 39 |
+
type: CsvTextConverter
|
| 40 |
+
|
| 41 |
+
pipelines:
|
| 42 |
+
- name: indexing
|
| 43 |
+
nodes:
|
| 44 |
+
- name: CSVConverter
|
| 45 |
+
inputs: [File]
|
| 46 |
+
- name: Retriever
|
| 47 |
+
inputs: [ CSVConverter ]
|
| 48 |
+
- name: DocumentStore
|
| 49 |
+
inputs: [ Retriever ]
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
# Motivation
|
| 53 |
+
|
| 54 |
+
Using YAML pipeline description it's not possible to describe a CSV FAQ indexing pipeline that takes CSV files as input
|
| 55 |
+
containing questions and answers and index them. It's a basic usage that still requires coding.
|
| 56 |
+
|
| 57 |
+
As we are presenting a tutorial `Utilizing Existing FAQs for Question Answering` it would be great to have this basic
|
| 58 |
+
node so that anyone can quickly run an FAQ Question Answering pipeline using only a YAML description and import their
|
| 59 |
+
CSV though REST API.
|
| 60 |
+
|
| 61 |
+
# Detailed design
|
| 62 |
+
|
| 63 |
+
I've added a new node: **`CsvTextConverter`** . It takes a file input, parse it as FAQ CSV file having `question` and `answer` column
|
| 64 |
+
and outputs `Document`s.
|
| 65 |
+
|
| 66 |
+
For now the node is very simple: can only handle a fixed CSV format and no other tabular data. It also
|
| 67 |
+
can't produce documents that are not of type `text`. These shortcomings can be addressed in later enhancements.
|
| 68 |
+
|
| 69 |
+
# Drawbacks
|
| 70 |
+
|
| 71 |
+
We could consider that developing this custom node is easy and a good way to learn Haystack,
|
| 72 |
+
but casual users shouldn't need to know this much before being able to index CSV files.
|
| 73 |
+
|
| 74 |
+
# Alternatives
|
| 75 |
+
|
| 76 |
+
Didn't consider any other design.
|
| 77 |
+
|
| 78 |
+
# Adoption strategy
|
| 79 |
+
|
| 80 |
+
It doesn't introduce any breaking change, any users having FAQs pipeline would be able to use the official nodes instead
|
| 81 |
+
of their existing ones.
|
| 82 |
+
|
| 83 |
+
# How we teach this
|
| 84 |
+
|
| 85 |
+
This may require updating this tutorial [Utilizing Existing FAQs for Question Answering](https://haystack.deepset.ai/tutorials/04_faq_style_qa)
|
| 86 |
+
and to document those 2 nodes.
|
testbed/deepset-ai__haystack/proposals/text/3558-embedding_retriever.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Start Date: 2022-11-11
|
| 2 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3558
|
| 3 |
+
- Github Issue:
|
| 4 |
+
|
| 5 |
+
> ⚠️ Superseded by https://github.com/deepset-ai/haystack/blob/main/proposals/text/5390-embedders.md
|
| 6 |
+
|
| 7 |
+
# Summary
|
| 8 |
+
|
| 9 |
+
- Current EmbeddingRetriever doesn't allow Haystack users to provide new embedding methods and is
|
| 10 |
+
currently constricted to farm, transformers, sentence transformers, OpenAI and Cohere based
|
| 11 |
+
embedding approaches. Any new encoding methods need to be explicitly added to Haystack
|
| 12 |
+
and registered with the EmbeddingRetriever.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
- We should allow users to easily plug-in new embedding methods to EmbeddingRetriever. For example, a Haystack user should be able to
|
| 16 |
+
add custom embeddings without having to commit additional code to Haystack repository.
|
| 17 |
+
|
| 18 |
+
# Basic example
|
| 19 |
+
EmbeddingRetriever is instantiated with:
|
| 20 |
+
|
| 21 |
+
``` python
|
| 22 |
+
retriever = EmbeddingRetriever(
|
| 23 |
+
document_store=document_store,
|
| 24 |
+
embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
|
| 25 |
+
model_format="sentence_transformers",
|
| 26 |
+
)
|
| 27 |
+
```
|
| 28 |
+
- The current approach doesn't provide a pluggable abstraction point of composition but
|
| 29 |
+
rather attempts to satisfy various embedding methodologies by having a lot of
|
| 30 |
+
parameters which keep ever expanding.
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
- The new approach allows creation of the underlying embedding mechanism (EmbeddingEncoder)
|
| 34 |
+
which is then in turn plugged into EmbeddingRetriever. For example:
|
| 35 |
+
|
| 36 |
+
``` python
|
| 37 |
+
encoder = SomeNewFancyEmbeddingEncoder(api_key="asdfklklja",
|
| 38 |
+
query_model="text-search-query",
|
| 39 |
+
doc_model="text-search-doc")
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
- EmbeddingEncoder is then used for the creation of EmbeddingRetriever. EmbeddingRetriever
|
| 43 |
+
init method doesn't get polluted with additional parameters as all of the peculiarities
|
| 44 |
+
of a particular encoder methodology are contained on in its abstraction layer.
|
| 45 |
+
|
| 46 |
+
``` python
|
| 47 |
+
retriever = EmbeddingRetriever(
|
| 48 |
+
document_store=document_store,
|
| 49 |
+
encoder=encoder
|
| 50 |
+
)
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
# Motivation
|
| 54 |
+
|
| 55 |
+
- Why are we doing this? What use cases does it support? What is the expected outcome?
|
| 56 |
+
|
| 57 |
+
We could certainly keep the current solution as is; it does implement a decent level
|
| 58 |
+
of composition/decoration to lower coupling between EmbeddingRetriever and the underlying
|
| 59 |
+
mechanism of embedding (sentence transformers, OpenAI, etc). However, the current mechanism
|
| 60 |
+
in place basically hard-codes available embedding implementations and prevents our users from
|
| 61 |
+
adding new embedding mechanism by themselves outside of Haystack repository. We also might
|
| 62 |
+
want to have a non-public dC embedding mechanism in the future. In the current design a non-public
|
| 63 |
+
dC embedding mechanism would be impractical. In addition, the more underlying implementations we
|
| 64 |
+
add we'll continue to "pollute" EmbeddingRetriever init method with more and more parameters.
|
| 65 |
+
This is certainly less than ideal long term.
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
- EmbeddingEncoder classes should be subclasses of BaseComponent! As subclasses of BaseComponent,
|
| 69 |
+
we can use them outside the EmbeddingRetriever context in indexing pipelines, generating the
|
| 70 |
+
embeddings. We are currently employing a kludge of using Retrievers which is quite counter-intuitive
|
| 71 |
+
and confusing for our users.
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
- EmbeddingEncoder classes might sound overly complicated, especially with a distinguishing mechanism
|
| 75 |
+
name pre-appended (i.e CohereEmbeddingEncoder). Therefore, we'll adopt <specific>Embedder
|
| 76 |
+
naming scheme, i.e. CohereEmbedder, SentenceTransformerEmbedder and so on.
|
| 77 |
+
|
| 78 |
+
# Detailed design
|
| 79 |
+
|
| 80 |
+
- Our new EmbeddingRetriever would still wrap the underlying encoding mechanism in the form of
|
| 81 |
+
_BaseEmbedder. _BaseEmbedder still needs to implement methods:
|
| 82 |
+
- embed_queries
|
| 83 |
+
- embed_documents
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
- The new design approach differs is in the creation of EmbeddingRetriever - rather than hiding the underlying encoding
|
| 87 |
+
mechanism one could simply create the EmbeddingRetriever with a specific encoder directly. For example:
|
| 88 |
+
|
| 89 |
+
```
|
| 90 |
+
retriever = EmbeddingRetriever(
|
| 91 |
+
document_store=document_store,
|
| 92 |
+
encoder=OpenAIEmbedder(api_key="asdfklklja", model="ada"),
|
| 93 |
+
#additional EmbeddingRetriever-abstraction-level parameters
|
| 94 |
+
)
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
- If the "two-step approach" of EmbeddingRetriever initialization is no longer the ideal solution (issues with current
|
| 98 |
+
schema generation and loading/saving via YAML pipelines) we might simply add the EmbeddingRetriever
|
| 99 |
+
class for every supported encoding approach. For example, we could have OpenAIEmbeddingRetriever, CohereEmbeddingRetriever,
|
| 100 |
+
SentenceTransformerEmbeddingRetriever and so on. Each of these retrievers will delegate the bulk of the work to an
|
| 101 |
+
existing EmbeddingRetriever with a per-class-specific Embedder set in the class constructor (for that custom
|
| 102 |
+
encoding part). We'll get the best of both worlds. Each <Specific>EmeddingRetriever will have only the relevant primitives
|
| 103 |
+
parameters for the **init()** constructor; the underlying EmbeddingRetriever attribute in <Specific>EmeddingRetriever
|
| 104 |
+
will handle most of the business logic of retrieving, yet each retriever will use an appropriate per-class-specific
|
| 105 |
+
Embedder for the custom encoding part.
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# Drawbacks
|
| 110 |
+
- The main shortcoming are:
|
| 111 |
+
- The "two-step approach" in EmbeddingRetriever initialization
|
| 112 |
+
- Likely be an issue for the current schema generation and loading/saving via YAML pipelines (see solution above)
|
| 113 |
+
- It is a API breaking change so it'll require code update for all EmbeddingRetriever usage both in our codebase and for Haystack users
|
| 114 |
+
- Can only be done in major release along with other breaking changes
|
| 115 |
+
|
| 116 |
+
# Alternatives
|
| 117 |
+
|
| 118 |
+
We could certainly keep everything as is :-)
|
| 119 |
+
|
| 120 |
+
# Adoption strategy
|
| 121 |
+
- As it is a breaking change, we should implement it for the next major release.
|
| 122 |
+
|
| 123 |
+
# How do we teach this?
|
| 124 |
+
- This change would require only a minor change in documentation.
|
| 125 |
+
- The concept of embedding retriever remains, just the mechanics are slightly changed
|
| 126 |
+
- All docs and tutorials need to be updated
|
| 127 |
+
- Haystack users are informed about a possibility to create and use their own embedders for embedding retriever.
|
| 128 |
+
- # Unresolved questions
|
| 129 |
+
|
| 130 |
+
Optional, but suggested for first drafts. What parts of the design are still
|
| 131 |
+
TBD?
|
testbed/deepset-ai__haystack/proposals/text/3665-prompt-node.md
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Start Date: 2022-12-04
|
| 2 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3665
|
| 3 |
+
- Github Issue: https://github.com/deepset-ai/haystack/issues/3306
|
| 4 |
+
- Example Colab notebook: https://colab.research.google.com/drive/1bOIxb8cnpTrpMtTSBArY9FJlL59Ar4K_?usp=sharing
|
| 5 |
+
|
| 6 |
+
# Summary
|
| 7 |
+
|
| 8 |
+
- The PromptNode class is the central abstraction in Haystack's large language model (LLM) support.
|
| 9 |
+
Such a component supports multiple NLP tasks out-of-the-box. PromptNode allows users to
|
| 10 |
+
perform multiple tasks, such as summarization, question answering, question generation etc. using a single,
|
| 11 |
+
unified model within the Haystack framework.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
- One of the benefits of PromptNode is that it allows users to define and add additional tasks
|
| 15 |
+
that are supported by the model. This can enable users to extend the capabilities of the model and
|
| 16 |
+
to use it for a wider range of NLP tasks within the Haystack ecosystem.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Basic example
|
| 20 |
+
|
| 21 |
+
PromptNode is instantiated with the underlying LLM model and prompted by using natural language:
|
| 22 |
+
|
| 23 |
+
``` python
|
| 24 |
+
from haystack.nodes.llm import PromptNode
|
| 25 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base")
|
| 26 |
+
pn("What is the capital of Germany?")
|
| 27 |
+
|
| 28 |
+
----------------------------------------------------------------------------
|
| 29 |
+
['berlin']
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
# Motivation
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
- The use of large language models (LLMs) has become increasingly popular in natural language
|
| 36 |
+
processing (NLP) due to their ability to capture complex and nuanced patterns in language.
|
| 37 |
+
PromptNode allows users to leverage the power of LLMs in the Haystack ecosystem, and
|
| 38 |
+
to perform multiple NLP tasks using a single, unified model. This provides a flexible and efficient
|
| 39 |
+
tool for NLP in Haystack, and can enable users to improve the performance and reliability of their applications.
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
- Modern LLM support hundreds if not thousands of tasks. Aside from PromptNode we'll define prompt templates for
|
| 43 |
+
dozen or so most popular NLP tasks and allow users to register prompt templates for additional tasks. The
|
| 44 |
+
extensible and modular approach would allow users to extend the capabilities of the model and to use
|
| 45 |
+
it for a wider range of NLP tasks within the Haystack ecosystem. Prompt engineers would define templates
|
| 46 |
+
for each NLP task and register them with the PromptNode. The burden of defining the best templates for each task
|
| 47 |
+
would be on the prompt engineers and not on the users.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
- The use of templates to define NLP tasks can make it easier for users to use PromptNode, as
|
| 51 |
+
they do not need to know the details of how the model works or how to define tasks for it. This can
|
| 52 |
+
reduce the learning curve and make it easier for users to get started with PromptNode and
|
| 53 |
+
to leverage the power of LLMs in Haystack.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
- The extensible and modular approach of PromptNode allows users to easily add support for
|
| 57 |
+
additional templates, even on-the-fly, which can enable them to extend the capabilities of the model and to use it for
|
| 58 |
+
a wider range of NLP tasks. This can provide users with more flexibility and control over the model,
|
| 59 |
+
and can enable them to tailor it to their specific needs and applications.
|
| 60 |
+
|
| 61 |
+
# Detailed design
|
| 62 |
+
|
| 63 |
+
- The PromptNode class is the most important abstraction in Haystack's large language model (LLM) support.
|
| 64 |
+
In addition to PromptNode class, we'll also define a set of prompt templates for the most popular NLP tasks.
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
- NLP prompt templates will be represented by `PromptTemplate` class.
|
| 68 |
+
|
| 69 |
+
``` python
|
| 70 |
+
class PromptTemplate(BaseTemplate):
|
| 71 |
+
|
| 72 |
+
name: str
|
| 73 |
+
prompt_text: str
|
| 74 |
+
input_variables: List[str]
|
| 75 |
+
|
| 76 |
+
````
|
| 77 |
+
|
| 78 |
+
PromptNode would, out-of-the-box, support 10-20 default NLP tasks defined by PromptTemplate instances. However, it would
|
| 79 |
+
allow registering additional templates with PromptNode.
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
- The prompt templates for default tasks (question-answering,question-generation, summarization etc.) could be examined by the user
|
| 83 |
+
using `get_prompt_templates_names` class method of the PromptNode. For example:
|
| 84 |
+
|
| 85 |
+
``` python
|
| 86 |
+
from haystack.nodes.llm import PromptNode
|
| 87 |
+
PromptNode.get_prompt_templates_names()
|
| 88 |
+
|
| 89 |
+
----------------------------------------------------------------------------
|
| 90 |
+
['question-answering',
|
| 91 |
+
'question-generation',
|
| 92 |
+
'conditioned-question-generation',
|
| 93 |
+
'summarization',
|
| 94 |
+
'question-answering-check']
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
- PromptNode supports natural language prompting (using `prompt` method) by specifying prompt template method parameter. For example:
|
| 99 |
+
|
| 100 |
+
``` python
|
| 101 |
+
from haystack.nodes.llm import PromptNode
|
| 102 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base")
|
| 103 |
+
pn.prompt("question-generation", documents=["Berlin is the capital of Germany."])
|
| 104 |
+
|
| 105 |
+
----------------------------------------------------------------------------
|
| 106 |
+
['What is the capital of Germany?']
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
- PromptNode supports selecting a particular default template for a certain task (e.g. question-generation) and then subsequently
|
| 110 |
+
using the selected template until user changes the current template. For example:
|
| 111 |
+
|
| 112 |
+
``` python
|
| 113 |
+
qa = pn.use_prompt_template("deepset/question-generation-v2")
|
| 114 |
+
qa(documents=["Berlin is the capital of Germany."])
|
| 115 |
+
|
| 116 |
+
----------------------------------------------------------------------------
|
| 117 |
+
['What is the capital of Germany?']
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
- The addition of new prompt templates is supported by the `add_prompt_template` method. For example:
|
| 121 |
+
|
| 122 |
+
``` python
|
| 123 |
+
from haystack.nodes.llm import PromptNode
|
| 124 |
+
PromptNode.add_prompt_template(PromptTemplate(name="sentiment-analysis",
|
| 125 |
+
prompt_text="Please give a sentiment for this context. Answer with positive, "
|
| 126 |
+
"negative or neutral. Context: $documents; Answer:",
|
| 127 |
+
input_variables=["documents"]))
|
| 128 |
+
PromptNode.get_prompt_templates_names()
|
| 129 |
+
|
| 130 |
+
----------------------------------------------------------------------------
|
| 131 |
+
['question-answering',
|
| 132 |
+
'question-generation',
|
| 133 |
+
'conditioned-question-generation',
|
| 134 |
+
'summarization',
|
| 135 |
+
'question-answering-check',
|
| 136 |
+
'sentiment-analysis']
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
- Users can inspect registered prompt templates with two class methods: `get_prompt_templates_names` and `get_prompt_templates`. The first
|
| 140 |
+
method, as we have seen, simply lists the names of the supported templates while the second method returns the list of `PromptTemplate`
|
| 141 |
+
instances, in readable format, allowing users to inspect the actual prompt template used and the templates input parameters.
|
| 142 |
+
|
| 143 |
+
``` python
|
| 144 |
+
from haystack.nodes.llm import PromptNode
|
| 145 |
+
PromptNode.get_prompt_templates()
|
| 146 |
+
|
| 147 |
+
----------------------------------------------------------------------------
|
| 148 |
+
[PromptTemplate(name="sentiment-analysis",
|
| 149 |
+
prompt_text="Please give a sentiment for this context. Answer with positive, "
|
| 150 |
+
"negative or neutral. Context: $documents; Answer:",
|
| 151 |
+
input_variables=["documents"], ...]
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
- However, aside from existing templates, users should also be able to use "on-the-fly" templates without registering them first. For example:
|
| 157 |
+
|
| 158 |
+
``` python
|
| 159 |
+
from haystack.nodes.llm import PromptNode
|
| 160 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base")
|
| 161 |
+
prompt_template = PromptTemplate(name="sentiment-analysis",
|
| 162 |
+
prompt_text="Please give a sentiment for this context. "
|
| 163 |
+
"Answer with positive, negative or neutral. Context: $documents; Answer:",
|
| 164 |
+
input_variables=["documents"])
|
| 165 |
+
pn.prompt(prompt_template, documents=["I really enjoyed the recent movie."])
|
| 166 |
+
|
| 167 |
+
----------------------------------------------------------------------------
|
| 168 |
+
['positive']
|
| 169 |
+
```
|
| 170 |
+
This, "on-the-fly" approach might be handy if users want to simply try stuff out
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
- Therefore, the most central API method of the PromptNode class would be the `prompt` method with the following signature:
|
| 174 |
+
``` python
|
| 175 |
+
def prompt(self, prompt_template: Union[str, PromptTemplate] = None, *args, **kwargs) -> List[str]:
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
- PromptNode class `__init__` constructor, aside from the `model_name_or_path` parameter would also have a
|
| 180 |
+
`prompt_template` parameter which would serve as the current and default template of the PromptNode.
|
| 181 |
+
|
| 182 |
+
- ``` python
|
| 183 |
+
def __init__(self, model_name_or_path: str = "google/flan-t5-base", prompt_template: Union[str, PromptTemplate] = None):
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
If the `prompt_template` is not specified in the `PromptNode` init method then user is required to specify the
|
| 187 |
+
template in the prompt method:
|
| 188 |
+
|
| 189 |
+
``` python
|
| 190 |
+
from haystack.nodes.llm import PromptNode
|
| 191 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base")
|
| 192 |
+
pn.prompt("question-generation", documents=["Berlin is the capital of Germany."])
|
| 193 |
+
|
| 194 |
+
----------------------------------------------------------------------------
|
| 195 |
+
['What is the capital of Germany?']
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
Otherwise, when the `PromptNode` is initialized with a prompt template user can invoke the `PromptNode` directly
|
| 199 |
+
|
| 200 |
+
``` python
|
| 201 |
+
from haystack.nodes.llm import PromptNode
|
| 202 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base", prompt_template="question-generation")
|
| 203 |
+
pn(documents=["Berlin is the capital of Germany."])
|
| 204 |
+
|
| 205 |
+
----------------------------------------------------------------------------
|
| 206 |
+
['What is the capital of Germany?']
|
| 207 |
+
```
|
| 208 |
+
|
| 209 |
+
- Template parameters verification
|
| 210 |
+
|
| 211 |
+
All template input parameters will be verified to match the template definition and the corresponding runtime
|
| 212 |
+
parameters for the input variables will be checked for type and value. For example:
|
| 213 |
+
|
| 214 |
+
``` python
|
| 215 |
+
from haystack.nodes.llm import PromptNode
|
| 216 |
+
on = PromptNode(model_name_or_path="google/flan-t5-base")
|
| 217 |
+
on.prompt("question-generation", some_unknown_param=["Berlin is the capital of Germany."])
|
| 218 |
+
|
| 219 |
+
----------------------------------------------------------------------------
|
| 220 |
+
ValueError Traceback (most recent call last)
|
| 221 |
+
<ipython-input-16-369cca52e960> in <module>
|
| 222 |
+
1 # tasks parameters are checked
|
| 223 |
+
----> 2 sa(some_param=[Document("Berlin is the capital of Germany.")])
|
| 224 |
+
|
| 225 |
+
2 frames
|
| 226 |
+
/usr/local/lib/python3.8/dist-packages/haystack/nodes/llm/multi_task.py in __call__(self, *args, **kwargs)
|
| 227 |
+
34 if set(template_dict.keys()) != set(self.input_variables):
|
| 228 |
+
35 available_params = set(list(template_dict.keys()) + list(set(kwargs.keys())))
|
| 229 |
+
---> 36 raise ValueError(f"Expected prompt params {self.input_variables} but got {list(available_params)}")
|
| 230 |
+
37
|
| 231 |
+
38 template_dict["prompt_template"] = self.prompt_text
|
| 232 |
+
|
| 233 |
+
ValueError: Expected prompt params ['documents'] but got ['some_unknown_param']
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
- Pipelines
|
| 237 |
+
|
| 238 |
+
Even though we can use PromptNode directly its real power lies in using pipelines and Haystack. For example, we
|
| 239 |
+
can retrieve documents from the document store using the query and then inject the retrieved documents into documents
|
| 240 |
+
as a parameter to the selected PromptNode template. For example:
|
| 241 |
+
|
| 242 |
+
``` python
|
| 243 |
+
from haystack.pipelines import PromptNode
|
| 244 |
+
top_k = 3
|
| 245 |
+
query = "Who are the parents of Arya Stark?"
|
| 246 |
+
retriever = EmbeddingRetriever(...)
|
| 247 |
+
pn = PromptNode(model_name_or_path="google/flan-t5-base", prompt_template="question-answering")
|
| 248 |
+
|
| 249 |
+
pipe = Pipeline()
|
| 250 |
+
pipe.add_node(component=retriever, name="Retriever", inputs=["Query"])
|
| 251 |
+
pipe.add_node(component=pn, name="prompt_node", inputs=["Retriever"])
|
| 252 |
+
|
| 253 |
+
output = pipe.run(query=query,
|
| 254 |
+
params={"Retriever": {"top_k": top_k}},
|
| 255 |
+
questions=[query for n in range(0, top_k)],
|
| 256 |
+
#documents parameter we need for this task will be automatically populated by the retriever
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
output["results"]
|
| 260 |
+
```
|
| 261 |
+
|
| 262 |
+
- However, we are still not utilizing the full power of Haystack pipelines. What if we could use more than
|
| 263 |
+
one PromptNode in the pipeline? Perhaps we could first retrieve documents from the retriever, pass it
|
| 264 |
+
to first PromptNode that will generate questions from these documents, and then add a
|
| 265 |
+
second PromptNode component that will answer those generated questions given the documents as the
|
| 266 |
+
context. Here is how we can do exactly that:
|
| 267 |
+
|
| 268 |
+
``` python
|
| 269 |
+
top_k = 3
|
| 270 |
+
query = "Who are the parents of Arya Stark?"
|
| 271 |
+
retriever = EmbeddingRetriever(...)
|
| 272 |
+
model = PromptModel(model_name_or_path="google/flan-t5-small")
|
| 273 |
+
|
| 274 |
+
qg = PromptNode(prompt_template="question-generation", prompt_model=model, output_variable="questions")
|
| 275 |
+
qa = PromptNode(prompt_template="question-answering", prompt_model=model)
|
| 276 |
+
|
| 277 |
+
pipe = Pipeline()
|
| 278 |
+
pipe.add_node(component=retriever, name="Retriever", inputs=["Query"])
|
| 279 |
+
pipe.add_node(component=qg, name="qg", inputs=["Retriever"])
|
| 280 |
+
pipe.add_node(component=qa, name="qa", inputs=["qg"])
|
| 281 |
+
|
| 282 |
+
result = pipe.run(query=query)
|
| 283 |
+
|
| 284 |
+
print(result["results"])
|
| 285 |
+
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
The above example shows how we can use PromptNode in a pipeline and how we can bind the results of one
|
| 289 |
+
PromptNode to the input of another PromptNode. The `output_variable` parameter used in the constructor of the
|
| 290 |
+
question answering node, and assigned value `questions` indicates that variable `questions` could be resolved by any
|
| 291 |
+
downstream pipeline node. In this particular case, the output of the question generation node will be passed to downstream
|
| 292 |
+
nodes, and answering node will use that `questions` variable to inject its stored value into the `questions` parameter of
|
| 293 |
+
the question answering template.
|
| 294 |
+
|
| 295 |
+
A careful reader of this proposal will also notice that we are using the same PromptModel instance for both PromptNodes.
|
| 296 |
+
This is done mainly for reuse as the PromptModel instance could be a locally run LLM and we don't want to load it
|
| 297 |
+
multiple times.
|
| 298 |
+
|
| 299 |
+
As LLMs are very resource intensive we can also envision a scenario where we would like to use a remote LLM service.
|
| 300 |
+
In such cases we can use multiple instances of a PromptNode in a pipeline directly thus bypassing PromptModel altogether.
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
- Pipeline YAML config file
|
| 304 |
+
|
| 305 |
+
Let's recreate the above pipeline using a YAML config file and a declarative way of defining a pipeline.
|
| 306 |
+
|
| 307 |
+
```yaml
|
| 308 |
+
|
| 309 |
+
components:
|
| 310 |
+
|
| 311 |
+
# can go in pipeline
|
| 312 |
+
- name: prompt_node
|
| 313 |
+
params:
|
| 314 |
+
prompt_template: template
|
| 315 |
+
model_name_or_path: model
|
| 316 |
+
output_variable: "questions"
|
| 317 |
+
type: PromptNode
|
| 318 |
+
|
| 319 |
+
# can go in pipeline
|
| 320 |
+
- name: prompt_node_2
|
| 321 |
+
params:
|
| 322 |
+
prompt_template: "question-answering"
|
| 323 |
+
model_name_or_path: deepset/model-name
|
| 324 |
+
type: PromptNode
|
| 325 |
+
|
| 326 |
+
# not in pipeline - only needed if you're reusing the model across multiple PromptNode in a pipeline
|
| 327 |
+
# and hidden from users in the Python beginner world
|
| 328 |
+
- name: model
|
| 329 |
+
params:
|
| 330 |
+
model_name_or_path: google/flan-t5-xl
|
| 331 |
+
type: PromptModel
|
| 332 |
+
|
| 333 |
+
# not in pipeline
|
| 334 |
+
- name: template
|
| 335 |
+
params:
|
| 336 |
+
name: "question-generation-v2"
|
| 337 |
+
prompt_text: "Given the following $documents, please generate a question. Question:"
|
| 338 |
+
input_variables: documents
|
| 339 |
+
type: PromptTemplate
|
| 340 |
+
|
| 341 |
+
pipelines:
|
| 342 |
+
- name: question-generation-answering-pipeline
|
| 343 |
+
nodes:
|
| 344 |
+
- name: EmbeddingRetriever
|
| 345 |
+
inputs: [Query]
|
| 346 |
+
- name: prompt_node
|
| 347 |
+
inputs: [EmbeddingRetriever]
|
| 348 |
+
- name: prompt_node_2
|
| 349 |
+
inputs: [prompt_node]
|
| 350 |
+
```
|
| 351 |
+
First of all, notice how we reuse the resource heavy PromptModel instance across multiple PromptNode instances. And
|
| 352 |
+
although we could have used already registered `question-generation` prompt template, we decided to define a new one
|
| 353 |
+
called `question-generation-v2` and as such set it as the default template for the first PromptNode. We also defined
|
| 354 |
+
the output of the first PromptNode as `questions` and used that variable in the second PromptNode.
|
| 355 |
+
|
| 356 |
+
In conclusion, we can see that the YAML config file is a mirror image of the previous code centric pipeline
|
| 357 |
+
example and also a very powerful way of defining a pipeline.
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
- Default tasks/prompts to be added to PromptNode
|
| 362 |
+
|
| 363 |
+
[Muffin]:
|
| 364 |
+
- Summarization
|
| 365 |
+
- Natural Language Inference
|
| 366 |
+
- Multiple-Choice QA
|
| 367 |
+
- Translation
|
| 368 |
+
- Sentiment Analysis
|
| 369 |
+
- Extractive QA
|
| 370 |
+
- Structured Data to Text
|
| 371 |
+
- Coreference Resolution
|
| 372 |
+
- Code Repair
|
| 373 |
+
- Code Error Generation
|
| 374 |
+
- Dialogue Context Generation
|
| 375 |
+
- Closed-Book QA
|
| 376 |
+
- Next Sentence Prediction
|
| 377 |
+
- Paraphrasing Identification
|
| 378 |
+
- Conversational Question Answering
|
| 379 |
+
- Topic Classification
|
| 380 |
+
- Mathematical QA
|
| 381 |
+
- Dialog Next Turn Prediction
|
| 382 |
+
- Grammatical Acceptability
|
| 383 |
+
- Punctuation fixing
|
| 384 |
+
|
| 385 |
+
[T0-SF]:
|
| 386 |
+
- Adversarial QA
|
| 387 |
+
- Question Generation
|
| 388 |
+
- Commonsense Reasoning
|
| 389 |
+
- Title Generation
|
| 390 |
+
- Dialogue Turn Prediction
|
| 391 |
+
- Predict Span Indices
|
| 392 |
+
- Context Generation
|
| 393 |
+
|
| 394 |
+
[NIV2]:
|
| 395 |
+
- Program Execution
|
| 396 |
+
- Text Matching
|
| 397 |
+
- Toxic Language Detection
|
| 398 |
+
- Cause Effect Classification
|
| 399 |
+
- Information Extraction
|
| 400 |
+
- Textual Entailment
|
| 401 |
+
- Wrong Candidate Generation
|
| 402 |
+
- Named Entity Recognition
|
| 403 |
+
- Commonsense Classification
|
| 404 |
+
- Fill-in-the-blank
|
| 405 |
+
- Text Completion
|
| 406 |
+
- Sentence Composition
|
| 407 |
+
- Question Understanding
|
| 408 |
+
|
| 409 |
+
[CoT Reasoning]:
|
| 410 |
+
- Explanation Generation
|
| 411 |
+
- Generate Question And Answer
|
| 412 |
+
- Grade School Math Word Problems
|
| 413 |
+
- Algebraic Question Answering
|
| 414 |
+
- Common Sense Reasoning Over Entities
|
| 415 |
+
- Common Sense Reasoning For QA
|
| 416 |
+
- Passage Based Question Answering
|
| 417 |
+
- Sense-Making And Explanation
|
| 418 |
+
|
| 419 |
+
# Drawbacks
|
| 420 |
+
- One potential drawback of PromptNode is that it may require a significant amount of computational resources
|
| 421 |
+
to use. This may limit its use in applications or environments where there are constraints on the available hardware
|
| 422 |
+
or software resources.
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
- Due to current pipeline design limitations PromptTemplate has to be a subclass of BaseComponent. This might slightly
|
| 426 |
+
confuse some users who are already familiar with Haystack components. We will mitigate this issue in subsequent releases
|
| 427 |
+
as we refactor the pipeline design. All in all, PromptTemplate will be a thin class with minimal inheritance signature from some base class.
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
# Alternatives
|
| 432 |
+
|
| 433 |
+
- One alternative to PromptNode is to continue to use separate models for each NLP task in Haystack. This
|
| 434 |
+
can enable users to tailor the model to the specific requirements of each task, and to potentially improve the
|
| 435 |
+
performance of the model for that task by additional fine-tuning or model adaptation via GPL. However, using separate
|
| 436 |
+
models may require these complex and computationally intensive training and deployment processes, and may not be as
|
| 437 |
+
efficient or flexible as using a single, unified model.
|
| 438 |
+
|
| 439 |
+
# Adoption strategy
|
| 440 |
+
- This is not a breaking change proposal and we should implement it immediately.
|
| 441 |
+
|
| 442 |
+
# How do we teach this?
|
| 443 |
+
- This change would require change in documentation.
|
| 444 |
+
- We can provide examples of how to use PromptNode in Haystack pipelines via tutorials.
|
| 445 |
+
- Docs and tutorials need to be updated
|
testbed/deepset-ai__haystack/proposals/text/3695-demo-repository.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Start Date: 2022-12-12
|
| 2 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3695
|
| 3 |
+
- Github Issue: (if available, link the issue containing the original request for this change)
|
| 4 |
+
|
| 5 |
+
# Summary
|
| 6 |
+
|
| 7 |
+
A new git repository is created to host NLP applications showcasing Haystack's features.
|
| 8 |
+
|
| 9 |
+
# Basic example
|
| 10 |
+
|
| 11 |
+
A git repository was already created on Github as an example:
|
| 12 |
+
|
| 13 |
+
https://github.com/deepset-ai/haystack-demos
|
| 14 |
+
|
| 15 |
+
# Motivation
|
| 16 |
+
|
| 17 |
+
NLP applications showcasing Haystack's capabilities can be an invaluable learning resource
|
| 18 |
+
for its users, but at this moment we don't fully take advantage of the only one demo we have
|
| 19 |
+
as a documentation source.
|
| 20 |
+
|
| 21 |
+
This proposal aims at overcoming that limitation in two ways:
|
| 22 |
+
- Define better requirements for a demo application so that users can learn from it.
|
| 23 |
+
- Make it easier to add more demo applications showcasing Haystack.
|
| 24 |
+
|
| 25 |
+
# Detailed design
|
| 26 |
+
|
| 27 |
+
Every demo has a descriptive name that will be used as its identifier.
|
| 28 |
+
|
| 29 |
+
Every demo lives in a dedicated folder named after its identifier at the root of the repo, and
|
| 30 |
+
provides all the resources needed to understand the code, run the application locally or deploy it
|
| 31 |
+
remotely on a server.
|
| 32 |
+
|
| 33 |
+
Every demo provides a README.md file containing the following information:
|
| 34 |
+
- A brief description of the application and what's its goal.
|
| 35 |
+
- Explicit mention of which NLP use case is implemented: for example "QA", or "Document Retrieval".
|
| 36 |
+
- Detailed instructions about how to run the application locally.
|
| 37 |
+
- Any hardware requirement and the limitations when not provided (for example, a GPU device).
|
| 38 |
+
- How to modify and test the code, and how to contribute changes.
|
| 39 |
+
|
| 40 |
+
The code of a demo application should be tested whenever possible, and at least some of the
|
| 41 |
+
tests should be able to run in the repo CI system. Every demo has a dedicated Workflow defined
|
| 42 |
+
in a file named after its identifier. The workflow runs only when files in the demo folder are
|
| 43 |
+
modified.
|
| 44 |
+
|
| 45 |
+
In case the CI is needed for continuous deployment, or for building artifacts, a demo can have
|
| 46 |
+
more than one workflow file defined, named after its identifier plus a descriptive suffix, for
|
| 47 |
+
example: `my_demo_identifier.yml` for tests, `my_demo_identifier_docker.yml` for building a
|
| 48 |
+
Docker image, `my_demo_identifier_deploy.yml` for continuous delivery.
|
| 49 |
+
|
| 50 |
+
# Drawbacks
|
| 51 |
+
|
| 52 |
+
- The code of the existing demo would be removed from Haystack and potentially become harder to
|
| 53 |
+
find for existing contributors.
|
| 54 |
+
- The proposed design dictates a list of new requirements for a demo that will take time to
|
| 55 |
+
implement.
|
| 56 |
+
|
| 57 |
+
# Alternatives
|
| 58 |
+
|
| 59 |
+
- Leave things as they are
|
| 60 |
+
- Implement the design proposal to a subfolder of Haystack's git repository
|
| 61 |
+
|
| 62 |
+
# Adoption strategy
|
| 63 |
+
|
| 64 |
+
Adoption will be mostly driven by communicating the changes to the community and monitoring the
|
| 65 |
+
traffic in the new Github repository: interacting with the existing demo will not be affected
|
| 66 |
+
but accessing the code would.
|
| 67 |
+
|
| 68 |
+
# How we teach this
|
| 69 |
+
|
| 70 |
+
- A link to the demo repository will be added to the web page of the [demo itself](https://haystack-demo.deepset.ai/).
|
| 71 |
+
- Haystack's README and documentation will mention where to find the code for the demos.
|
| 72 |
+
- [Haystack Home](https://haystack.deepset.ai) will host a whole section dedicated to Haystack demos
|
| 73 |
+
(detailing the aforementioned section is out of scope for this proposal).
|
| 74 |
+
|
| 75 |
+
# Unresolved questions
|
| 76 |
+
|
| 77 |
+
N/A.
|
testbed/deepset-ai__haystack/proposals/text/3784-shaper.md
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Title: Shaper
|
| 2 |
+
- Decision driver: Vladimir
|
| 3 |
+
- Start Date: 2022-12-29
|
| 4 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3784/
|
| 5 |
+
|
| 6 |
+
# Summary
|
| 7 |
+
|
| 8 |
+
Input/Output Shaper (Shaper) is a new pipeline component that can invoke arbitrary, registered functions, on the
|
| 9 |
+
invocation context (query, documents etc.) of a pipeline and pass the new/modified variables further down the pipeline.
|
| 10 |
+
|
| 11 |
+
# Basic example
|
| 12 |
+
|
| 13 |
+
In the following example, we'll use Shaper to add a new variable `questions` to the invocation context.
|
| 14 |
+
`questions` is a copy of query variable. This functionality of Shaper is useful when we simply want to
|
| 15 |
+
rename a variable in the invocation context e.g. in cases where the PromptNode template is expecting a variable
|
| 16 |
+
'questions' rather than 'query'.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
```python
|
| 20 |
+
|
| 21 |
+
from haystack import Pipeline, Document
|
| 22 |
+
|
| 23 |
+
with open("tmp_config.yml", "w") as tmp_file:
|
| 24 |
+
tmp_file.write(
|
| 25 |
+
f"""
|
| 26 |
+
version: ignore
|
| 27 |
+
components:
|
| 28 |
+
- name: shaper
|
| 29 |
+
params:
|
| 30 |
+
inputs:
|
| 31 |
+
query:
|
| 32 |
+
output: questions
|
| 33 |
+
type: Shaper
|
| 34 |
+
pipelines:
|
| 35 |
+
- name: query
|
| 36 |
+
nodes:
|
| 37 |
+
- name: shaper
|
| 38 |
+
inputs:
|
| 39 |
+
- Query
|
| 40 |
+
"""
|
| 41 |
+
)
|
| 42 |
+
pipeline = Pipeline.load_from_yaml(path="tmp_config.yml")
|
| 43 |
+
result = pipeline.run(
|
| 44 |
+
query="What can you tell me about Berlin?",
|
| 45 |
+
documents=[Document("Berlin is an amazing city."), Document("I love Berlin.")],
|
| 46 |
+
)
|
| 47 |
+
assert result
|
| 48 |
+
# query has been renamed to questions
|
| 49 |
+
assert isinstance(result["meta"]["invocation_context"]["questions"], str)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
# Motivation
|
| 55 |
+
|
| 56 |
+
We need Shaper to support the use cases where we want to easily add new variables to the pipeline invocation context.
|
| 57 |
+
These new variables hold values which are a result of some arbitrary function invocation on the existing variables
|
| 58 |
+
in the invocation context.
|
| 59 |
+
|
| 60 |
+
Shaper is especially useful when combined with PromptNode(s). Aside from simply renaming variables to match
|
| 61 |
+
the templates of PromptNodes, we can also use Shaper to add new variables to the invocation context. Often
|
| 62 |
+
these new variables are the result of some arbitrary function invocation on the existing variables in the
|
| 63 |
+
invocation context.
|
| 64 |
+
|
| 65 |
+
The original idea for Shaper is related to question answering use case using PromptNode. In QA, query string variable
|
| 66 |
+
passed to a pipeline run method needs to be expanded to a list of strings with the list size matching the size of the
|
| 67 |
+
documents list. Therefore, we can use the query as the question to pose to all the documents in the documents list.
|
| 68 |
+
|
| 69 |
+
The expected outcome of using Shaper is that we can easily add new variables to the invocation context so they can
|
| 70 |
+
match the prompt templates of PromptNodes. Multiple Shaper components can be used in a pipeline to modify the
|
| 71 |
+
invocation context as needed.
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# Detailed design
|
| 75 |
+
|
| 76 |
+
The Shaper component is most often defined in pipelines YAML file. The YAML component definition consists of the
|
| 77 |
+
params block:
|
| 78 |
+
|
| 79 |
+
```yaml
|
| 80 |
+
components:
|
| 81 |
+
- name: shaper
|
| 82 |
+
params:
|
| 83 |
+
inputs:
|
| 84 |
+
query:
|
| 85 |
+
func: expand
|
| 86 |
+
output: questions
|
| 87 |
+
params:
|
| 88 |
+
expand_target: query
|
| 89 |
+
size:
|
| 90 |
+
func: len
|
| 91 |
+
params:
|
| 92 |
+
- documents
|
| 93 |
+
documents:
|
| 94 |
+
func: concat
|
| 95 |
+
params:
|
| 96 |
+
docs: documents
|
| 97 |
+
delimiter: " "
|
| 98 |
+
type: Shaper
|
| 99 |
+
pipelines:
|
| 100 |
+
- name: query
|
| 101 |
+
nodes:
|
| 102 |
+
- name: shaper
|
| 103 |
+
inputs:
|
| 104 |
+
- Query
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
The params block consists of the inputs list. Each item in the inputs list is a dictionary with the key being the
|
| 108 |
+
invocation context variable that we want to modify.
|
| 109 |
+
|
| 110 |
+
In the example above, we have two items in the inputs list. The first item is a dictionary with the key `query` and the
|
| 111 |
+
second item is a dictionary with the key `documents`.
|
| 112 |
+
|
| 113 |
+
For the query variable, we want to invoke the function `expand` and store the result in the variable `questions`.
|
| 114 |
+
The `expand` function takes two keyword parameters: `expand_target` and `size`. The `expand_target` parameter is the
|
| 115 |
+
name of the variable in the invocation context that we want to expand. The `size`parameter is a result of the `len`
|
| 116 |
+
function invocation on the variable `documents`.
|
| 117 |
+
|
| 118 |
+
For the documents variable, we want to invoke the function `concat` and store the result in the same variable.
|
| 119 |
+
Therefore, after the invocation, the documents variable will hold a result of `concat` function invocation while
|
| 120 |
+
we'll also have a new variable `questions` in the invocation context. The questions variable will hold a result of
|
| 121 |
+
`expand` function invocation.
|
| 122 |
+
|
| 123 |
+
The important thing to note here is that we can invoke functions with both keyword and positional parameters. Function
|
| 124 |
+
`len` is an example of a function that takes non-keyword positional parameters. The `concat` and `expand` function
|
| 125 |
+
take keyword parameters. These functions can also be invoked with positional parameters but that is not recommended.
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
### Default parameters
|
| 129 |
+
|
| 130 |
+
The Shaper component can also be configured with default parameters. Default parameters are used when we
|
| 131 |
+
don't specify the parameters for a function invocation. The default parameters are specified in the function definition.
|
| 132 |
+
|
| 133 |
+
For example, in the YAML snippet definition below, we have a function `expand` that takes two keyword parameters:
|
| 134 |
+
`expand_target` and `size`. However, we haven't specified either of these parameters in the YAML config. This is
|
| 135 |
+
possible because we assume that the first parameter is always the variable we want to invoke the function on. In this
|
| 136 |
+
case, the variable `query`. The second parameter is the `size` of the list we want to expand the variable to. Here we
|
| 137 |
+
have also defined a helper function in Shaper called `expand:size` that calculates the default value of
|
| 138 |
+
this parameter - `len(documents)`.
|
| 139 |
+
|
| 140 |
+
Therefore, the `expand` function, described below, will be invoked with the following parameters: `query`
|
| 141 |
+
and `len(documents)`
|
| 142 |
+
|
| 143 |
+
```yaml
|
| 144 |
+
components:
|
| 145 |
+
- name: shaper
|
| 146 |
+
params:
|
| 147 |
+
inputs:
|
| 148 |
+
query:
|
| 149 |
+
func: expand
|
| 150 |
+
output: questions
|
| 151 |
+
type: Shaper
|
| 152 |
+
pipelines:
|
| 153 |
+
- name: query
|
| 154 |
+
nodes:
|
| 155 |
+
- name: shaper
|
| 156 |
+
inputs:
|
| 157 |
+
- Query
|
| 158 |
+
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
We need the "default parameter" functionality to support YAML files definitions that are more concise and less
|
| 162 |
+
error-prone.
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
### Omitting output parameter
|
| 166 |
+
|
| 167 |
+
The output parameter is optional. If it is omitted, the result of the function invocation will be stored in
|
| 168 |
+
the corresponding input variable. In the example below, the output of expand function will be stored in the
|
| 169 |
+
query variable.
|
| 170 |
+
|
| 171 |
+
```yaml
|
| 172 |
+
components:
|
| 173 |
+
- name: shaper
|
| 174 |
+
params:
|
| 175 |
+
inputs:
|
| 176 |
+
query:
|
| 177 |
+
func: expand
|
| 178 |
+
type: Shaper
|
| 179 |
+
pipelines:
|
| 180 |
+
- name: query
|
| 181 |
+
nodes:
|
| 182 |
+
- name: shaper
|
| 183 |
+
inputs:
|
| 184 |
+
- Query
|
| 185 |
+
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
### Order of function invocation
|
| 189 |
+
|
| 190 |
+
The order of function invocation is important. The functions are invoked in the order they are defined in the YAML.
|
| 191 |
+
In the example below, we have two input variables: `query` and `documents`. The `query` variable is expanded to a
|
| 192 |
+
list of strings and stored in the variable `questions`. The `documents` variable is then contracted and the
|
| 193 |
+
variable `questions` is immediately used as the `num_tokens` keyword parameter to the `concat` function.
|
| 194 |
+
|
| 195 |
+
```yaml
|
| 196 |
+
components:
|
| 197 |
+
- name: shaper
|
| 198 |
+
params:
|
| 199 |
+
inputs:
|
| 200 |
+
query:
|
| 201 |
+
func: expand
|
| 202 |
+
output: questions
|
| 203 |
+
params:
|
| 204 |
+
expand_target: query
|
| 205 |
+
size:
|
| 206 |
+
func: len
|
| 207 |
+
params:
|
| 208 |
+
- documents
|
| 209 |
+
documents:
|
| 210 |
+
func: concat
|
| 211 |
+
output: documents
|
| 212 |
+
params:
|
| 213 |
+
docs: documents
|
| 214 |
+
delimiter: " "
|
| 215 |
+
num_tokens:
|
| 216 |
+
func: len
|
| 217 |
+
params:
|
| 218 |
+
- questions
|
| 219 |
+
type: Shaper
|
| 220 |
+
pipelines:
|
| 221 |
+
- name: query
|
| 222 |
+
nodes:
|
| 223 |
+
- name: shaper
|
| 224 |
+
inputs:
|
| 225 |
+
- Query
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
### Security
|
| 229 |
+
|
| 230 |
+
The Shaper component prevents arbitrary code execution. There should be no dangerous `exec` or `eval` Python calls. Only
|
| 231 |
+
the functions we have specified in the Shaper component are allowed to be invoked. The functions are specified in the
|
| 232 |
+
constructor using immutable data structures.
|
| 233 |
+
|
| 234 |
+
This security measure might be relaxed in the future to allow users to specify their own functions. However, this
|
| 235 |
+
change will require a more thorough security review.
|
| 236 |
+
|
| 237 |
+
# Drawbacks and other considerations
|
| 238 |
+
|
| 239 |
+
Although a "normal use" of PromptNodes would not trigger a need for Shaper there are cases where its
|
| 240 |
+
use is necessary. In cases where we can only use pipeline definitions to configure the pipeline (via YAML),
|
| 241 |
+
we need to use it.
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
- Implementation and maintenance cost should not be high.
|
| 245 |
+
- Shaper is not really useful outside of Haystack pipeline.
|
| 246 |
+
- Shaper could turn out to be useful in other use cases as well - i.e. declarative pre/post processing.
|
| 247 |
+
- Shaper integrates well with PromptNodes and other components.
|
| 248 |
+
- No braking changes to existing components.
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# Alternatives
|
| 252 |
+
|
| 253 |
+
A better solution would likely be a more general run method for components. This would allow us to arbitrarily
|
| 254 |
+
define the pipeline invocation context. However even in those cases we'll need to use Shaper to modify
|
| 255 |
+
existing variables in invocation context as needed.
|
| 256 |
+
|
| 257 |
+
# Adoption strategy
|
| 258 |
+
|
| 259 |
+
Haystack users can start using Shaper in their pipelines immediately. There are no breaking changes to
|
| 260 |
+
existing components or pipelines.
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# How we teach this
|
| 264 |
+
|
| 265 |
+
We will need docs update to teach users how to use Shaper. The docs will need to explain the
|
| 266 |
+
motivation using Shaper and PromptNode examples. We also need to show the usage via tutorials.
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# Unresolved questions
|
| 270 |
+
|
| 271 |
+
Optional, but suggested for first drafts. What parts of the design are still
|
| 272 |
+
TBD?
|
testbed/deepset-ai__haystack/proposals/text/3875-table-cell.md
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Title: TableCell Dataclass
|
| 2 |
+
- Decision driver: Sebastian Lee
|
| 3 |
+
- Start Date: 2023-01-17
|
| 4 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3875
|
| 5 |
+
- Github Issue: https://github.com/deepset-ai/haystack/issues/3616
|
| 6 |
+
|
| 7 |
+
# Summary
|
| 8 |
+
|
| 9 |
+
When returning answers for a TableQA pipeline we would like to return the column and row index as the answer location
|
| 10 |
+
within the table since the table is either returned as a list of lists in Haystack.
|
| 11 |
+
This would allow users to easily look up the answer in the returned table to fetch the text directly from the table,
|
| 12 |
+
identify the row or column labels for that answer, or generally perform operations on the table near or around the
|
| 13 |
+
answer cell.
|
| 14 |
+
|
| 15 |
+
# Basic Example
|
| 16 |
+
|
| 17 |
+
When applicable, write a snippet of code showing how the new feature would be used.
|
| 18 |
+
```python
|
| 19 |
+
import pandas as pd
|
| 20 |
+
from haystack.nodes import TableReader
|
| 21 |
+
from haystack import Document
|
| 22 |
+
|
| 23 |
+
data = {
|
| 24 |
+
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
|
| 25 |
+
"age": ["58", "47", "60"],
|
| 26 |
+
"number of movies": ["87", "53", "69"],
|
| 27 |
+
"date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
|
| 28 |
+
}
|
| 29 |
+
table_doc = Document(content=pd.DataFrame(data), content_type="table")
|
| 30 |
+
reader = TableReader(model_name_or_path="google/tapas-base-finetuned-wtq", max_seq_len=128)
|
| 31 |
+
prediction = reader.predict(query="Who was in the most number of movies?", documents=[table_doc])
|
| 32 |
+
answer = prediction["answers"][0]
|
| 33 |
+
|
| 34 |
+
# New feature
|
| 35 |
+
# answer.context -> [["actor", "age", "number of movies"], ["Brad Pitt",...], [...]]
|
| 36 |
+
# answer.offsets_in_context[0] -> (row=1, col=1)
|
| 37 |
+
print(answer.context[answer.offsets_in_context[0].row][answer.offsets_in_context[0].col])
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
# Motivation
|
| 41 |
+
|
| 42 |
+
## Why do we need this feature?
|
| 43 |
+
To allow users to easily look up the answer cell in the returned table to fetch the answer text
|
| 44 |
+
directly from the table, identify the row or column labels for that answer, or generally perform operations on the table
|
| 45 |
+
near or around the answer cell.
|
| 46 |
+
|
| 47 |
+
Currently, we return the location of the answer in the **linearized** version of the table, so we can use the
|
| 48 |
+
`Span` dataclass. The `Span` dataclass is reproduced below:
|
| 49 |
+
```python
|
| 50 |
+
@dataclass
|
| 51 |
+
class Span:
|
| 52 |
+
start: int
|
| 53 |
+
end: int
|
| 54 |
+
"""
|
| 55 |
+
Defining a sequence of characters (Text span) or cells (Table span) via start and end index.
|
| 56 |
+
For extractive QA: Character where answer starts/ends
|
| 57 |
+
For TableQA: Cell where the answer starts/ends (counted from top left to bottom right of table)
|
| 58 |
+
|
| 59 |
+
:param start: Position where the span starts
|
| 60 |
+
:param end: Position where the span ends
|
| 61 |
+
"""
|
| 62 |
+
```
|
| 63 |
+
This is inconvenient for users because they would need to know how the table is linearized (column major or row major)
|
| 64 |
+
so they could reconstruct the column and row indices of the answer before they could locate the answer cell in the table.
|
| 65 |
+
|
| 66 |
+
## What use cases does it support?
|
| 67 |
+
Some examples are already stated above but to recap, to easily perform operations on the table near or around the answer cell.
|
| 68 |
+
|
| 69 |
+
## What's the expected outcome?
|
| 70 |
+
The addition of a new dataclass called `TableCell` that would look like
|
| 71 |
+
```python
|
| 72 |
+
@dataclass
|
| 73 |
+
class TableCell:
|
| 74 |
+
row: int
|
| 75 |
+
col: int
|
| 76 |
+
"""
|
| 77 |
+
Defining a table cell via the row and column index.
|
| 78 |
+
|
| 79 |
+
:param row: Row index of the cell
|
| 80 |
+
:param col: Column index of the cell
|
| 81 |
+
"""
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
# Detailed design
|
| 85 |
+
|
| 86 |
+
**New terminology:** `TableCell`, the new name for the dataclass to
|
| 87 |
+
store the column and row index of the answer cell.
|
| 88 |
+
|
| 89 |
+
**Basic Example:** [Above Basic Example](#basic-example)
|
| 90 |
+
|
| 91 |
+
## Code changes
|
| 92 |
+
- Addition of `TableCell` dataclass to https://github.com/deepset-ai/haystack/blob/main/haystack/schema.py
|
| 93 |
+
```python
|
| 94 |
+
@dataclass
|
| 95 |
+
class TableCell:
|
| 96 |
+
row: int
|
| 97 |
+
col: int
|
| 98 |
+
"""
|
| 99 |
+
Defining a table cell via the row and column index.
|
| 100 |
+
|
| 101 |
+
:param row: Row index of the cell
|
| 102 |
+
:param col: Column index of the cell
|
| 103 |
+
"""
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
- Updating code (e.g. schema objects, classes, functions) that use `Span` to also support `TableCell` where appropriate.
|
| 107 |
+
This includes:
|
| 108 |
+
- Updating the `Answer` dataclass to support `TableCell` as a valid type for `offsets_in_document` and `offsets_in_context`
|
| 109 |
+
```python
|
| 110 |
+
@dataclass
|
| 111 |
+
class Answer:
|
| 112 |
+
answer: str
|
| 113 |
+
type: Literal["generative", "extractive", "other"] = "extractive"
|
| 114 |
+
score: Optional[float] = None
|
| 115 |
+
context: Optional[Union[str, List[List]]] = None
|
| 116 |
+
offsets_in_document: Optional[List[Span], List[TableCell]] = None
|
| 117 |
+
offsets_in_context: Optional[List[Span], List[TableCell]] = None
|
| 118 |
+
document_id: Optional[str] = None
|
| 119 |
+
meta: Optional[Dict[str, Any]] = None
|
| 120 |
+
```
|
| 121 |
+
- Similar to how we can return a list of `Span`s, we would allow a list of `TableCell`s to be returned to handle the case
|
| 122 |
+
when multiple `TableCell`s are returned to form a final answer.
|
| 123 |
+
- Updating any functions that accept table answers as input to use the new `col` and `row` variables instead of `start` and `end` variables.
|
| 124 |
+
This type of check for table answers is most likely already done by checking if the `context` is of type `pd.DataFrame`.
|
| 125 |
+
- `TableReader` and `RCIReader` to return `TableCell` objects instead of `Span`.
|
| 126 |
+
|
| 127 |
+
Changes related to the Edge Case/Bug below
|
| 128 |
+
- Update `Document.content` and `Answer.context` to use `List[List]` instead of `pd.DataFrame`.
|
| 129 |
+
- Update `TableReader` nodes to convert table from `List[List]` into `pd.DataFrame` before inputting to the model.
|
| 130 |
+
|
| 131 |
+
## Edge Case/Bug
|
| 132 |
+
Internally, Haystack stores a table as a pandas DataFrame in the `Answer` dataclass, which does not treat the column
|
| 133 |
+
labels as the first row in the table.
|
| 134 |
+
However, in Haystack's rest-api the table is converted into a list of lists format where the column labels are
|
| 135 |
+
stored as the first row, which can be seen [here](https://github.com/deepset-ai/haystack/pull/3872), which is consistent
|
| 136 |
+
with the `Document.to_dict()` method seen [here](https://github.com/deepset-ai/haystack/blob/6af4f14fe0d375a1ae0ced18930a9239401231c7/haystack/schema.py#L164-L165).
|
| 137 |
+
|
| 138 |
+
This means that the current `Span` and (new) `TableCell` dataclass point to the wrong location when the table is
|
| 139 |
+
converted to a list of lists.
|
| 140 |
+
|
| 141 |
+
For example, the following code
|
| 142 |
+
```python
|
| 143 |
+
import pandas as pd
|
| 144 |
+
from haystack import Document
|
| 145 |
+
|
| 146 |
+
data = {
|
| 147 |
+
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
|
| 148 |
+
"age": ["58", "47", "60"],
|
| 149 |
+
"number of movies": ["87", "53", "69"],
|
| 150 |
+
"date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
|
| 151 |
+
}
|
| 152 |
+
table_doc = Document(content=pd.DataFrame(data), content_type="table")
|
| 153 |
+
span = (0, 0)
|
| 154 |
+
print(table_doc.content.iloc[span]) # prints "brad pitt"
|
| 155 |
+
|
| 156 |
+
dict_table_doc = table_doc.to_dict()
|
| 157 |
+
print(dict_table_doc["content"][span[0]][span[1]]) # prints "actors"
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
We have decided to store the table internally as a list of lists to avoid this issue. See discussion starting [here](https://github.com/deepset-ai/haystack/pull/3875#discussion_r1088766318).
|
| 161 |
+
|
| 162 |
+
# Drawbacks
|
| 163 |
+
|
| 164 |
+
Look at the feature from the other side: what are the reasons why we should _not_ work on it? Consider the following:
|
| 165 |
+
|
| 166 |
+
- What's the implementation cost, both in terms of code size and complexity?
|
| 167 |
+
|
| 168 |
+
I don't believe this will require too much code change since we already check for Table like answers by checking if the
|
| 169 |
+
returned context is of type string or pandas Dataframe.
|
| 170 |
+
|
| 171 |
+
- Can the solution you're proposing be implemented as a separate package, outside of Haystack?
|
| 172 |
+
|
| 173 |
+
Technically yes, but since it affects core classes like `TableReader`, and `RCIReader` it makes sense to implement in
|
| 174 |
+
Haystack.
|
| 175 |
+
|
| 176 |
+
- Does it teach people more about Haystack?
|
| 177 |
+
|
| 178 |
+
It would update already existing documentation and tutorials of Haystack.
|
| 179 |
+
|
| 180 |
+
- How does this feature integrate with other existing and planned features?
|
| 181 |
+
|
| 182 |
+
This feature directly integrates and impacts the TableQA feature of Haystack.
|
| 183 |
+
|
| 184 |
+
- What's the cost of migrating existing Haystack pipelines (is it a breaking change?)?
|
| 185 |
+
|
| 186 |
+
Yes there are breaking changes that would affect end users.
|
| 187 |
+
1. The way to access the offsets in returned Answers would be different.
|
| 188 |
+
Following the deprecation policy we will support both `Span` and `TableCell` (can be toggled between using a boolean flag)
|
| 189 |
+
for 2 additional versions of Haystack.
|
| 190 |
+
2. Tables in Haystack Documents and Answers will change from type pandas Dataframe to a list of lists.
|
| 191 |
+
|
| 192 |
+
# Alternatives
|
| 193 |
+
|
| 194 |
+
## What's the impact of not adding this feature?
|
| 195 |
+
Requiring users to figure out how to interpret the linearized answer cell coordinates to reconstruct the row and column indices
|
| 196 |
+
to be able to access the answer cell in the returned table.
|
| 197 |
+
|
| 198 |
+
## Other designs
|
| 199 |
+
1. Expand `Span` dataclass to have optional `col` and `row` fields. This would require a similar check as `TableCell`, but instead
|
| 200 |
+
require checking for which of the elements are populated, which seems unnecessarily complex.
|
| 201 |
+
```python
|
| 202 |
+
@dataclass
|
| 203 |
+
class Span:
|
| 204 |
+
start: int = None
|
| 205 |
+
end: int = None
|
| 206 |
+
col: int = None
|
| 207 |
+
row: int = None
|
| 208 |
+
```
|
| 209 |
+
2. Use the existing `Span` dataclass and put the row index and column index as the `start` and `end` respectively.
|
| 210 |
+
This may be confusing to users since it is not obvious that `start` should refer to `row` and `end` should refer to `column`.
|
| 211 |
+
```python
|
| 212 |
+
answer_cell_offset = Span(start=row_idx, end=col_idx)
|
| 213 |
+
```
|
| 214 |
+
3. Provide a convenience function shown [here](https://github.com/deepset-ai/haystack/issues/3616#issuecomment-1361300067)
|
| 215 |
+
to help users convert the linearized `Span` back to row and column indices. I believe this solution is non-ideal since it would
|
| 216 |
+
require a user of the rest_api to access a python function to convert the linearized indices back into row and column indices.
|
| 217 |
+
|
| 218 |
+
# Adoption strategy
|
| 219 |
+
|
| 220 |
+
## How will the existing Haystack users adopt it?
|
| 221 |
+
Haystack users would immediately experience this change once they update their installation of Haystack if they were using
|
| 222 |
+
the TableQA reader. This would be a breaking change since it would change the `offsets_in_document` and
|
| 223 |
+
`offsets_in_context` in the returned `Answer`. I'm not sure if there would be a straightforward way to write a migration
|
| 224 |
+
script for this change.
|
| 225 |
+
|
| 226 |
+
# How we teach this
|
| 227 |
+
|
| 228 |
+
Would implementing this feature mean the documentation must be re-organized
|
| 229 |
+
or updated? Does it change how Haystack is taught to new developers at any level?
|
| 230 |
+
|
| 231 |
+
- The API docs for `TableCell` would need to be added.
|
| 232 |
+
- The documentation page for [Table Question Answering](https://docs.haystack.deepset.ai/docs/table_qa) would need to be updated.
|
| 233 |
+
- Update the (TableQa tutorial)[https://github.com/deepset-ai/haystack-tutorials/blob/main/tutorials/15_TableQA.ipynb]
|
| 234 |
+
to reflect the `Span` is no longer linearzied.
|
| 235 |
+
|
| 236 |
+
# Unresolved questions
|
| 237 |
+
|
| 238 |
+
No more unresolved questions.
|
testbed/deepset-ai__haystack/proposals/text/3925-mrkl-agent.md
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Title: MRKLAgent
|
| 2 |
+
- Decision driver: @julian-risch (in close collaboration with @vblagoje )
|
| 3 |
+
- Start Date: 2023-01-27
|
| 4 |
+
- Proposal PR: https://github.com/deepset-ai/haystack/pull/3925
|
| 5 |
+
- Github Issue or Discussion: https://github.com/deepset-ai/haystack/issues/3753
|
| 6 |
+
|
| 7 |
+
# Summary
|
| 8 |
+
The Agent class answers queries by choosing between different tools, which are implemented as pipelines or nodes.
|
| 9 |
+
It uses a large language model (LLM) to generate a thought based on the query, choose a tool, and generate the input for the tool.
|
| 10 |
+
Based on the result returned by an action/tool (used interchangeably), the Agent has two options.
|
| 11 |
+
It can either stop if it knows the answer now or repeat the process of 1) thought, 2) action choice, 3) action input.
|
| 12 |
+
|
| 13 |
+
The Agent can be used for questions containing multiple subquestions that can be answered step-by-step (Multihop QA).
|
| 14 |
+
Combined with tools like the PythonRuntime or SerpAPIComponent we imagine for Haystack, the Agent can query the web and do calculations.
|
| 15 |
+
|
| 16 |
+
We have a [notebook](https://colab.research.google.com/drive/1oJf4gxkokIazLN26NNt6XsewMvCmVrz8?usp=sharing) that demonstrates how to use an Agent with two tools: PythonRuntime and SerpAPIComponent. It requires API keys for OpenAI and SerpAPI.
|
| 17 |
+
The notebook is based on the branch https://github.com/deepset-ai/haystack/compare/main...mrkl-pipeline (no pull request)
|
| 18 |
+
|
| 19 |
+
# Basic example
|
| 20 |
+
|
| 21 |
+
An example of an Agent could use two tools: a web search engine and a calculator.
|
| 22 |
+
|
| 23 |
+
The query "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" can be broken down into three steps:
|
| 24 |
+
1) Searching the web for the name of Olivia Wilde's boyfriend
|
| 25 |
+
2) Searching the web for the age of that boyfriend
|
| 26 |
+
3) Calculating that age raised to the 0.23 power
|
| 27 |
+
|
| 28 |
+
And the Agent would respond in the end with "Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896." A detailed walk-through follows below.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Motivation
|
| 32 |
+
|
| 33 |
+
With an Agent, users can combine multiple LLMs and tools, so that they can build a truly powerful app. They can use an LLM in a loop to answer more complex questions than with ExtractiveQA or GenerativeQA. With an Agent and a tool for web search, Haystack is not limited to extracting answers from a document store or generating answers based on model weights anymore but it can use the knowledge it retrieves on-the-fly from the web. Thereby, the model's knowledge does not get outdated.
|
| 34 |
+
|
| 35 |
+
In future, we envision that an Agent could use tools not only for retrieving knowledge but also for interacting with the world. For example, it could periodically skim through newly opened issues in Haystack's GitHub repository. If there is a question that can be answered based on documentation then the Agent could retrieve relevant pages from the documentation, generate an answer and post it as a first response to the issue.
|
| 36 |
+
|
| 37 |
+
# Detailed design
|
| 38 |
+
|
| 39 |
+
**Glossary**
|
| 40 |
+
- Thought: First part of a prompt generated by Agent that serves to breakdown the query into a plan, for example, what part of the question needs to be answered first.
|
| 41 |
+
- Action (or tool): Actions/tools are Haystack pipelines or nodes that the Agent can use to answer a question. We use tool and action interchangeably in this proposal until we decided on the best naming. Choosing a tool in each iteration is the center part of a prompt generated by Agent.
|
| 42 |
+
- Action input: Last part of a prompt generated by an Agent. It serves as the input to a tool that the Agent uses to answer a question.
|
| 43 |
+
- Observation: The output generated by a tool and sent back to the Agent.
|
| 44 |
+
|
| 45 |
+
The Agent consists of a PromptNode that generates thoughts, chooses actions, and generates action inputs.
|
| 46 |
+
Just like Haystack pipelines, an Agent can be loaded from a YAML file. That YAML file must also contain the tools of the Agent defined as pipelines or nodes.
|
| 47 |
+
Tools need to be added to an Agent so that it can use them, just like nodes need to be added to pipelines.
|
| 48 |
+
When a tool is added to an Agent, a description of the tools needs to be added so that the LLM knows when it is useful.
|
| 49 |
+
|
| 50 |
+
A key functionality of the Agent is that it can act iteratively and use any of the pre-defined tools as many times as it wants based on the input query and the results returned from the tools used earlier. In every iteration, it chooses one of the tools and generates the input for that tool dynamically.
|
| 51 |
+
An example application of this is MultiHopQA, where multiple subquestions need to be answered step-by-step.
|
| 52 |
+
For the example query "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?", the MRKLPipeline needs to answer several subquestions. Here is an example of a full transcript of the prompt input and generated output:
|
| 53 |
+
|
| 54 |
+
```
|
| 55 |
+
Answer the following questions as best as you can. You have access to the following tools:
|
| 56 |
+
|
| 57 |
+
Search: useful for when you need to answer questions about current events. You should ask targeted questions
|
| 58 |
+
Calculator: useful for when you need to answer questions about math
|
| 59 |
+
|
| 60 |
+
Use the following format:
|
| 61 |
+
|
| 62 |
+
Question: the input question you must answer
|
| 63 |
+
Thought: you should always think about what to do
|
| 64 |
+
Action: the action to take, should be one of [Search, Calculator]
|
| 65 |
+
Action Input: the input to the action
|
| 66 |
+
Observation: the result of the action
|
| 67 |
+
... (this Thought/Action/Action Input/Observation can repeat N times)
|
| 68 |
+
Thought: I now know the final answer
|
| 69 |
+
Final Answer: the final answer to the original input question
|
| 70 |
+
|
| 71 |
+
Begin!
|
| 72 |
+
Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
|
| 73 |
+
Thought: I need to do some research to answer this question.
|
| 74 |
+
Action: Search
|
| 75 |
+
Action Input: Olivia Wilde's boyfriend
|
| 76 |
+
Observation: First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016.
|
| 77 |
+
Thought: I need to find out his age
|
| 78 |
+
Action: Search
|
| 79 |
+
Action Input: Jason Sudeikis age
|
| 80 |
+
Observation: 47 years
|
| 81 |
+
Thought: I need to raise it to the 0.23 power
|
| 82 |
+
Action: Calculator
|
| 83 |
+
Action Input: 47^0.23
|
| 84 |
+
Observation: 2.4242784855673896
|
| 85 |
+
Thought: I now know the final answer
|
| 86 |
+
Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
## Agent steps breakdown
|
| 90 |
+
The above steps represent the entire action trace for the Agent. However, let's break it down into individual agent steps so we can understand how it makes decisions, chooses actions and action inputs.
|
| 91 |
+
|
| 92 |
+
### Step 1:
|
| 93 |
+
|
| 94 |
+
We start with a prompt where we instruct LLM on what we want. The first prompt we send to LLM is the following:
|
| 95 |
+
```
|
| 96 |
+
Answer the following questions as best as you can. You have access to the following tools:
|
| 97 |
+
|
| 98 |
+
Search: useful for when you need to answer questions about current events. You should ask targeted questions
|
| 99 |
+
Calculator: useful for when you need to answer questions about math
|
| 100 |
+
|
| 101 |
+
Use the following format:
|
| 102 |
+
|
| 103 |
+
Question: the input question you must answer
|
| 104 |
+
Thought: you should always think about what to do
|
| 105 |
+
Action: the action to take, should be one of [Search, Calculator]
|
| 106 |
+
Action Input: the input to the action
|
| 107 |
+
Observation: the result of the action
|
| 108 |
+
... (this Thought/Action/Action Input/Observation can repeat N times)
|
| 109 |
+
Thought: I now know the final Answer
|
| 110 |
+
Final Answer: the final Answer to the original input question
|
| 111 |
+
|
| 112 |
+
Begin!
|
| 113 |
+
Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
|
| 114 |
+
Thought:
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
Notice how we finish the prompt with the `Thought:` token, priming the model to start its generation of an actual plan of what needs to be done in the first step.
|
| 118 |
+
LLM would also generate `Action:` and `Action Input:` rows of this step which help us select an Action to execute and the input for that action.
|
| 119 |
+
As we also instruct the model to stop generating a response with stop words being `Observation:` the model response for this step is:
|
| 120 |
+
|
| 121 |
+
```
|
| 122 |
+
I need to do some research to answer this question.
|
| 123 |
+
Action: Search
|
| 124 |
+
Action Input: Olivia Wilde's boyfriend
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
At this point, we invoke Search (along with the input) and receive the response from the Search tool: "First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016."
|
| 128 |
+
|
| 129 |
+
We append the tool response under the `Observation:`
|
| 130 |
+
|
| 131 |
+
LLM generation above and the response from the Search action (added under Observation) are appended to the initial prompt.
|
| 132 |
+
|
| 133 |
+
### Step 2:
|
| 134 |
+
|
| 135 |
+
We start this step with the following prompt:
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
Answer the following questions as best as you can. You have access to the following tools:
|
| 139 |
+
|
| 140 |
+
Search: useful for when you need to answer questions about current events. You should ask targeted questions
|
| 141 |
+
Calculator: useful for when you need to answer questions about math
|
| 142 |
+
|
| 143 |
+
Use the following format:
|
| 144 |
+
|
| 145 |
+
Question: the input question you must answer
|
| 146 |
+
Thought: you should always think about what to do
|
| 147 |
+
Action: the action to take, should be one of [Search, Calculator]
|
| 148 |
+
Action Input: the input to the action
|
| 149 |
+
Observation: the result of the action
|
| 150 |
+
... (this Thought/Action/Action Input/Observation can repeat N times)
|
| 151 |
+
Thought: I now know the final Answer
|
| 152 |
+
Final Answer: the final Answer to the original input question
|
| 153 |
+
|
| 154 |
+
Begin!
|
| 155 |
+
Question: Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?
|
| 156 |
+
Thought: I need to do some research to answer this question.
|
| 157 |
+
Action: Search
|
| 158 |
+
Action Input: Olivia Wilde's boyfriend
|
| 159 |
+
Observation: First linked in November 2011, Wilde and Sudeikis got engaged in January 2013. They later became parents, welcoming son Otis in 2014 and daughter Daisy in 2016.
|
| 160 |
+
Thought:
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
Again, notice how we've added the response from LLM and the `Observation` from the tool to the prompt, and we finish the prompt with `Thought:` token, priming the model to start the response with the plan for this step. As in the previous step, the model generates an action plan and selects an action and its input. The LLM response is:
|
| 164 |
+
|
| 165 |
+
```
|
| 166 |
+
I need to find out his age
|
| 167 |
+
Action: Search
|
| 168 |
+
Action Input: Jason Sudeikis age
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
This LLM response above gives us enough information to invoke a Search tool again along with the appropriate input, and we receive the response from the Search: 47 years. We add this response to the prompt history as the `Observation:` keyword.
|
| 172 |
+
|
| 173 |
+
### Step 3:
|
| 174 |
+
|
| 175 |
+
For the sake of brevity, let's not list the entire prompt again. The critical part to remember is that we append the output of step 2 to the prompt history we are creating as we step through each agent step. These so-called reasoning traces help agents "understand" what needs to be done in each successive step.
|
| 176 |
+
The last part of the prompt is the following:
|
| 177 |
+
```
|
| 178 |
+
Thought: I need to find out his age
|
| 179 |
+
Action: Search
|
| 180 |
+
Action Input: Jason Sudeikis age
|
| 181 |
+
Observation: 47 years
|
| 182 |
+
Thought:
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
The LLM-generated response is:
|
| 186 |
+
|
| 187 |
+
```
|
| 188 |
+
I need to raise it to the 0.23 power
|
| 189 |
+
Action: Calculator
|
| 190 |
+
Action Input: 47^0.23
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
In this step, we invoke a new tool - The calculator with specified input. The calculator response is 2.4242784855673896
|
| 194 |
+
We added the calculator response to the prompt history under the Observation keyword.
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
### Step 4:
|
| 198 |
+
|
| 199 |
+
Again, we append a calculator response and prompt to the prompt history once again. Let's not list the entire prompt, but the last few lines:
|
| 200 |
+
```
|
| 201 |
+
I need to raise it to the 0.23 power
|
| 202 |
+
Action: Calculator
|
| 203 |
+
Action Input: 47^0.23
|
| 204 |
+
Observation: 2.4242784855673896
|
| 205 |
+
Thought:
|
| 206 |
+
```
|
| 207 |
+
|
| 208 |
+
The LLM-generated response is:
|
| 209 |
+
```
|
| 210 |
+
I now know the final answer
|
| 211 |
+
Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.
|
| 212 |
+
```
|
| 213 |
+
|
| 214 |
+
Using simple string parsing, we can detect that the mode in this step responded with the "Final Answer:" keyword just as we instructed, thus breaking out of the loop and completing the agent's task with a response returned to the agent's client.
|
| 215 |
+
In the rare case that "Final Answer:" is not generated even after many iterations, we can break out of the loop based on a maximum number of iterations allowed. Thereby, we can prevent an infinite loop.
|
| 216 |
+
|
| 217 |
+
## Agent Creation
|
| 218 |
+
|
| 219 |
+
The Agent can be either created programmatically or loaded from a YAML file.
|
| 220 |
+
In the following example, one tool is a node for searching the web. The other tool is a pipeline for doing calculations in python.
|
| 221 |
+
|
| 222 |
+
**Example programmatic creation:**
|
| 223 |
+
```python
|
| 224 |
+
search = SerpAPIComponent(api_key=os.environ.get("SERPAPI_API_KEY"), name="Serp", inputs=["Query"])
|
| 225 |
+
|
| 226 |
+
prompt_model=PromptModel(model_name_or_path="text-davinci-003", api_key=os.environ.get("OPENAI_API_KEY"))
|
| 227 |
+
|
| 228 |
+
calculator = Pipeline()
|
| 229 |
+
calculator.add_node(PromptNode(
|
| 230 |
+
model_name_or_path=prompt_model,
|
| 231 |
+
default_prompt_template=PromptTemplate(prompt_text="Write a simple python function that calculates..."),
|
| 232 |
+
output_variable="python_runtime_input") # input
|
| 233 |
+
calculator.add_node(PythonRuntime()) # actual calculator
|
| 234 |
+
|
| 235 |
+
prompt_node = PromptNode(
|
| 236 |
+
model_name_or_path=prompt_model,
|
| 237 |
+
stop_words=["Observation:"]
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
agent = Agent(prompt_node=prompt_node)
|
| 241 |
+
# Nodes and pipelines can be added as tools to the agent. Just as nodes can be added to pipelines with add_node()
|
| 242 |
+
agent.add_tool("Search", search, "useful for when you need to answer questions about current events. You should ask targeted questions")
|
| 243 |
+
agent.add_tool("Calculator", calculator, "useful for when you need to answer questions about math")
|
| 244 |
+
|
| 245 |
+
result = agent.run("What is 2 to the power of 3?")
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
**Example YAML file:**
|
| 249 |
+
```yaml
|
| 250 |
+
version: ignore
|
| 251 |
+
|
| 252 |
+
components:
|
| 253 |
+
- name: AgentPromptNode
|
| 254 |
+
type: PromptNode
|
| 255 |
+
params:
|
| 256 |
+
model_name_or_path: DavinciModel
|
| 257 |
+
stop_words: ['Observation:']
|
| 258 |
+
- name: DavinciModel
|
| 259 |
+
type: PromptModel
|
| 260 |
+
params:
|
| 261 |
+
model_name_or_path: 'text-davinci-003'
|
| 262 |
+
api_key: 'XYZ'
|
| 263 |
+
- name: Serp
|
| 264 |
+
type: SerpAPIComponent
|
| 265 |
+
params:
|
| 266 |
+
api_key: 'XYZ'
|
| 267 |
+
- name: CalculatorInput
|
| 268 |
+
type: PromptNode
|
| 269 |
+
params:
|
| 270 |
+
model_name_or_path: DavinciModel
|
| 271 |
+
default_prompt_template: CalculatorTemplate
|
| 272 |
+
output_variable: python_runtime_input
|
| 273 |
+
- name: Calculator
|
| 274 |
+
type: PythonRuntime
|
| 275 |
+
- name: CalculatorTemplate
|
| 276 |
+
type: PromptTemplate
|
| 277 |
+
params:
|
| 278 |
+
name: calculator
|
| 279 |
+
prompt_text: |
|
| 280 |
+
# Write a simple python function that calculates
|
| 281 |
+
# $query
|
| 282 |
+
# Do not print the result; invoke the function and assign the result to final_result variable
|
| 283 |
+
# Start with import statement
|
| 284 |
+
|
| 285 |
+
pipelines:
|
| 286 |
+
- name: calculator_pipeline
|
| 287 |
+
nodes:
|
| 288 |
+
- name: CalculatorInput
|
| 289 |
+
inputs: [Query]
|
| 290 |
+
- name: Calculator
|
| 291 |
+
inputs: [CalculatorInput]
|
| 292 |
+
|
| 293 |
+
agents:
|
| 294 |
+
- name: agent
|
| 295 |
+
params:
|
| 296 |
+
prompt_node: AgentPromptNode
|
| 297 |
+
tools:
|
| 298 |
+
- name: Search
|
| 299 |
+
pipeline_or_node: Serp
|
| 300 |
+
description: >
|
| 301 |
+
useful for when you need to answer questions about current events.
|
| 302 |
+
You should ask targeted questions
|
| 303 |
+
- name: Calculator
|
| 304 |
+
pipeline_or_node: calculator_pipeline
|
| 305 |
+
description: >
|
| 306 |
+
useful for when you need to answer questions about math
|
| 307 |
+
```
|
| 308 |
+
|
| 309 |
+
and loading from the YAML file into an Agent:
|
| 310 |
+
|
| 311 |
+
```python
|
| 312 |
+
agent = Agent.load_from_yaml(
|
| 313 |
+
"test.mrkl.haystack-pipeline.yml", agent_name="agent"
|
| 314 |
+
)
|
| 315 |
+
```
|
| 316 |
+
|
| 317 |
+
Pipelines, agents, nodes, and tools all implement `run` and `run_batch` methods, which is the minimal contract.
|
| 318 |
+
At the moment, tools are either pipelines or nodes but we can imagine more types of tools as long as they implement that minimal contract.
|
| 319 |
+
|
| 320 |
+
# Drawbacks
|
| 321 |
+
|
| 322 |
+
Although the scope of the initial Agent is limited, it can grow into a full-fledged framework consisting of
|
| 323 |
+
various types of agents (conversation, Robotic Process Automation etc.). The field of agents is rapidly growing, and we should be aware that it can even outgrow Haystack in the future. Perhaps we can start with the Agent being part of Haystack and potentially create a new project in the future.
|
| 324 |
+
|
| 325 |
+
One of the central building blocks of an Agent are the PromptNode and set "neural attachments" extending the agent's capabilities.
|
| 326 |
+
Many tools like Search, Calculator, Notion and API connectors are somewhat different conceptually from the existing Haystack components.
|
| 327 |
+
On the other hand, some of the existing Haystack components fit naturally into the framework of tools, for example, DocumentStore, Retriever, and Reader.
|
| 328 |
+
|
| 329 |
+
There is a non-negligible potential for a growing implementation cost of such an agent framework that might
|
| 330 |
+
stretch the resource away from the existing Haystack core. However, as LLM-based agents are an exciting and
|
| 331 |
+
rapidly growing field, they may raise Haystack awareness significantly.
|
| 332 |
+
|
| 333 |
+
# Alternatives
|
| 334 |
+
|
| 335 |
+
We have considered an alternative design where the Agent is just another node or a pipeline.
|
| 336 |
+
However, we decided to introduce it as a separate concept because of user-friendliness / clear code.
|
| 337 |
+
While a Pipeline is a collection of Nodes, an Agent is a collection of Pipelines.
|
| 338 |
+
Nodes in a pipeline have a pre-defined execution order, whereas the execution order of Pipelines in an Agent are chosen at runtime by a LLM.
|
| 339 |
+
|
| 340 |
+
Regarding the name *Agent*, we considered several alternatives and prefer *Agent* for its simplicity.
|
| 341 |
+
Alternative names:
|
| 342 |
+
- MRKLAgent
|
| 343 |
+
- LLMOrchestrator
|
| 344 |
+
- LLMChain
|
| 345 |
+
- Toolchain (fits nicely with tools and [toolchains](https://en.wikipedia.org/wiki/Toolchain) in software)
|
| 346 |
+
- PipelineComposer / LLMComposer
|
| 347 |
+
- PipelineComposition / LLMComposition
|
| 348 |
+
- Interesting naming tidbits:
|
| 349 |
+
- MRKL [paper](https://arxiv.org/pdf/2205.00445.pdf) never uses word agent, only system
|
| 350 |
+
- ReAct [paper](https://arxiv.org/pdf/2210.03629.pdf) uses agent almost exclusively
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
# Adoption strategy
|
| 354 |
+
|
| 355 |
+
Introducing the Agent concept is a rather big change that would require a careful adoption strategy. We would need a lot more documentation explaining these new concepts, and each attaching tool would need additional documentation.
|
| 356 |
+
|
| 357 |
+
However, the existing Haystack users, especially advanced users have already requested an agent framework to be added as part of the Haystack. We anticipate that advanced users will be the first to adopt the Agent.
|
| 358 |
+
|
| 359 |
+
Using an Agent requires an OpenAI api key and some tools require additional api keys, for example SerpAPI but there are free trials.
|
| 360 |
+
|
| 361 |
+
The debugging output of the Agent will help users to better understand how it works. In a debugger, the agent works as any other Haystack pipeline containing a prompt node.
|
| 362 |
+
|
| 363 |
+
# How we teach this
|
| 364 |
+
|
| 365 |
+
Yes, adding agents to Haystack would require a lot of documentation changes. Perhaps even a separate documentation for MRKL and other future agents somewhat detached from Haystack.
|
| 366 |
+
|
| 367 |
+
We can teach existing Haystack users about agents and agent tools in a new section of the documentation. We can also organize Discord office hours, tutorials, and webinars to teach the new concepts.
|
| 368 |
+
|
| 369 |
+
# Unresolved questions
|
| 370 |
+
|
| 371 |
+
**Name of the parameter pipeline_or_node**
|
| 372 |
+
* When we add a tool to the agent, we need to specify the name of the pipeline or node (component) to add.
|
| 373 |
+
This parameter could be called `pipeline_or_node` or `pipeline_or_component_name` etc.
|
| 374 |
+
|
| 375 |
+
**Umbrella Term for Pipeline and Agent**
|
| 376 |
+
* We need a term that captures pipelines and agents for communication with users (NLP application, flow, system, service, engine ...).
|
| 377 |
+
Let's have that conversation separately from this proposal.
|
| 378 |
+
|
| 379 |
+
**Tools we imagine in the near future**
|
| 380 |
+
* Tools will be discussed in a separate proposal.
|
testbed/deepset-ai__haystack/proposals/text/3959-json-converter.md
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Title: Addition of a `JsonConverter` node
|
| 2 |
+
- Decision driver: @bglearning
|
| 3 |
+
- Start Date: 2023-01-26
|
| 4 |
+
- Proposal PR: #3959
|
| 5 |
+
|
| 6 |
+
# Summary
|
| 7 |
+
|
| 8 |
+
Right now we don't have a node that can take json files as input to be fed into a pipeline.
|
| 9 |
+
|
| 10 |
+
Proposal: Add a `JsonConverter` node that takes in a json file, parses it, and generates `Document`s.
|
| 11 |
+
It would also support the `jsonl` format with one line corresponding to one document.
|
| 12 |
+
|
| 13 |
+
# Basic example
|
| 14 |
+
|
| 15 |
+
```python
|
| 16 |
+
from haystack.nodes import JsonConverter
|
| 17 |
+
|
| 18 |
+
converter = JsonConverter()
|
| 19 |
+
|
| 20 |
+
# Receive back List[Document]
|
| 21 |
+
docs = converter.convert("data_file.json")
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
With the `data_file.json` as a list of json representation of documents:
|
| 25 |
+
|
| 26 |
+
```json
|
| 27 |
+
[
|
| 28 |
+
{
|
| 29 |
+
"content": "...",
|
| 30 |
+
"content_type": "text", "meta": {...}
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"content": [["h1", "h2"], ["val1", "val2"]],
|
| 34 |
+
"content_type": "table", "meta": {...}
|
| 35 |
+
}
|
| 36 |
+
]
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
Alternatively, the data can also be `jsonl`.
|
| 40 |
+
By default, the converter will try to auto-detect between `json` and `jsonl`.
|
| 41 |
+
|
| 42 |
+
The main use case would be to be able to include this directly in the YAML specification
|
| 43 |
+
|
| 44 |
+
```yaml
|
| 45 |
+
...
|
| 46 |
+
|
| 47 |
+
pipelines:
|
| 48 |
+
- name: indexing
|
| 49 |
+
nodes:
|
| 50 |
+
- name: JsonConverter
|
| 51 |
+
inputs: [File]
|
| 52 |
+
- name: Retriever
|
| 53 |
+
inputs: [JsonConverter]
|
| 54 |
+
- name: DocumentStore
|
| 55 |
+
inputs: [Retriever]
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
# Motivation
|
| 59 |
+
|
| 60 |
+
Users may want to do some processing of the data themselves, persist it somehow, and only then pass it onto a haystack pipeline (for instance, by uploading into the REST API endpoint). Ideally this would happen without the need to create a custom endpoint.
|
| 61 |
+
|
| 62 |
+
For many such processing, json is a convenient intermediate format as it allows for things like specifying the metadata.
|
| 63 |
+
|
| 64 |
+
Specifically, one use-case that has come up for a team using haystack: they want to use a PDF parser (for tables) currently not in haystack. As such, they want to handle the parsing themselves outside of haystack, put the parsed result into a json file, and then pass it onto a haystack API endpoint.
|
| 65 |
+
|
| 66 |
+
Having a `JsonConverter` node would allow users to setup a haystack pipeline to ingest such data without the user having to create a custom node for it.
|
| 67 |
+
|
| 68 |
+
# Detailed design
|
| 69 |
+
|
| 70 |
+
The converter would primarily be a wrapper around `Document.from_dict`.
|
| 71 |
+
|
| 72 |
+
The schema accepted would be the a list of json dictionary of Documents.
|
| 73 |
+
So, the following, with `content` being the only compulsory field.
|
| 74 |
+
|
| 75 |
+
```
|
| 76 |
+
[
|
| 77 |
+
{
|
| 78 |
+
"content": str or list[list],
|
| 79 |
+
"content_type": str,
|
| 80 |
+
"meta": dict,
|
| 81 |
+
"id_hash_keys": list,
|
| 82 |
+
"score": float,
|
| 83 |
+
"embedding": array
|
| 84 |
+
},
|
| 85 |
+
...
|
| 86 |
+
]
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
```python
|
| 90 |
+
class JsonConverter(BaseConverter):
|
| 91 |
+
def __init__(self, ...):
|
| 92 |
+
...
|
| 93 |
+
|
| 94 |
+
def convert(
|
| 95 |
+
self,
|
| 96 |
+
file_path: Path,
|
| 97 |
+
meta: Optional[Dict[str, str]] = None,
|
| 98 |
+
encoding: Optional[str] = "UTF-8",
|
| 99 |
+
id_hash_keys: Optional[List[str]] = None,
|
| 100 |
+
...
|
| 101 |
+
) -> List[Document]:
|
| 102 |
+
if id_hash_keys is None:
|
| 103 |
+
id_hash_keys = self.id_hash_keys
|
| 104 |
+
|
| 105 |
+
documents = []
|
| 106 |
+
with open(file_path, encoding=encoding, errors="ignore") as f:
|
| 107 |
+
data = json.load(f)
|
| 108 |
+
for doc_dict in data:
|
| 109 |
+
doc_dict = dict(doc_dict)
|
| 110 |
+
doc_dict['id_hash_keys'] = id_hash_keys
|
| 111 |
+
doc_dict['meta'] = doc_dict.get('meta', dict())
|
| 112 |
+
|
| 113 |
+
if meta:
|
| 114 |
+
doc_dict['meta'].update(meta)
|
| 115 |
+
|
| 116 |
+
documents.append(Document.from_dict(doc_dict))
|
| 117 |
+
|
| 118 |
+
return documents
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
# Drawbacks
|
| 122 |
+
|
| 123 |
+
- It would add another node that needs to be maintained and documented.
|
| 124 |
+
|
| 125 |
+
# Alternatives
|
| 126 |
+
|
| 127 |
+
- This node could be created as a custom node for the particular application where it is required. But could be better to have it out-of-the-box.
|
| 128 |
+
- Design Alternative: Also, provide options to map custom fields to `Document` fields (E.g. {"review": "content"}) which could make this node a bit more flexible and might mean the user doesn't have to do some pre-formatting beforehand. But this can be a future development.
|
| 129 |
+
|
| 130 |
+
# Adoption strategy
|
| 131 |
+
|
| 132 |
+
It doesn't introduce a breaking change and wouldn't require changes in existing pipelines.
|
| 133 |
+
|
| 134 |
+
# How we teach this
|
| 135 |
+
|
| 136 |
+
It would be good to have this be part of the Guide (perhaps under File Converters).
|
| 137 |
+
|
| 138 |
+
Could also be mentioned in one of the tutorials. For instance, in the preprocessing tutorial where we say "Haystack expects data to be provided as a list documents in the following dictionary format".
|
| 139 |
+
|
| 140 |
+
# Unresolved questions
|
| 141 |
+
|
| 142 |
+
- Should the `content_type` allowed be restricted (e.g. only "text" and "table"). And relatedly, should the name be more specific? E.g. `JsonTableTextConverter` rather than `JsonConverter`. Currently leaning towards no restriction and the `JsonConverter` name.
|